about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py988
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py502
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py1740
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py249
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py208
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py991
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py490
8 files changed, 5188 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py
new file mode 100644
index 00000000..73393b81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py
@@ -0,0 +1,20 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._file_client_async import ShareFileClient
+from ._directory_client_async import ShareDirectoryClient
+from ._share_client_async import ShareClient
+from ._share_service_client_async import ShareServiceClient
+from ._lease_async import ShareLeaseClient
+
+
+__all__ = [
+    'ShareFileClient',
+    'ShareDirectoryClient',
+    'ShareClient',
+    'ShareServiceClient',
+    'ShareLeaseClient',
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py
new file mode 100644
index 00000000..8673362c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py
@@ -0,0 +1,988 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import time
+import warnings
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, cast, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_directory_properties
+from .._directory_client_helpers import (
+    _format_url,
+    _from_directory_url,
+    _parse_url
+)
+from .._generated.aio import AzureFileStorage
+from .._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from .._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import parse_connection_str, AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from ._file_client_async import ShareFileClient
+from ._models import DirectoryPropertiesPaged, Handle, HandlesPaged
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import DirectoryProperties, FileProperties, NTFSAttributes
+
+
+class ShareDirectoryClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific directory, although it may not yet exist.
+
+    For operations relating to a specific subdirectory or file in this share, the clients for those
+    entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the directory,
+        use the :func:`from_directory_url` classmethod.
+    :param share_name:
+        The name of the share for the directory.
+    :type share_name: str
+    :param str directory_path:
+        The directory path for the directory with which to interact.
+        If specified, this value will override a directory value specified in the directory URL.
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of an AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        directory_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.directory_path = directory_path
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareDirectoryClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_directory_url(
+        cls, directory_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create a ShareDirectoryClient from a directory url.
+
+        :param str directory_url:
+            The full URI to the directory.
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory authentication.
+            Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        account_url, share_name, directory_path, snapshot = _from_directory_url(directory_url, snapshot)
+        return cls(
+            account_url=account_url, share_name=share_name, directory_path=directory_path,
+            snapshot=snapshot, credential=credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including the current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self.directory_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        directory_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareDirectoryClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str directory_path:
+            The directory path.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of an AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            Optional[Union[str, dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential"]]
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory authentication.
+            Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs)
+
+    def get_file_client(self, file_name: str, **kwargs: Any) -> ShareFileClient:
+        """Get a client to interact with a specific file.
+
+        The file need not already exist.
+
+        :param str file_name:
+            The name of the file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if self.directory_path:
+            file_name = self.directory_path.rstrip('/') + "/" + file_name
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+        return ShareFileClient(
+            self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent,
+            **kwargs)
+
+    def get_subdirectory_client(self, directory_name: str, **kwargs) -> "ShareDirectoryClient":
+        """Get a client to interact with a specific subdirectory.
+
+        The subdirectory need not already exist.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START get_subdirectory_client]
+                :end-before: [END get_subdirectory_client]
+                :language: python
+                :dedent: 16
+                :caption: Gets the subdirectory client.
+        """
+        directory_path = directory_name
+        if self.directory_path:
+            directory_path = self.directory_path.rstrip('/') + "/" + directory_name
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent,
+            **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new directory under the directory referenced by the client.
+
+        :keyword file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "none" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :keyword file_creation_time: Creation time for the directory.
+        :paramtype file_creation_time: str or ~datetime.datetime or None
+        :keyword file_last_write_time: Last write time for the directory.
+        :paramtype file_last_write_time: str or ~datetime.datetime or None
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set
+            for the directory/file. This header can be used if Permission size is
+            <= 8KB, else file-permission-key header shall be used.
+            Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory/file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 16
+                :caption: Creates a directory.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        file_attributes = kwargs.pop('file_attributes', None)
+        file_creation_time = kwargs.pop('file_creation_time', None)
+        file_last_write_time = kwargs.pop('file_last_write_time', None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        file_permission = kwargs.pop('file_permission', None)
+        file_permission_key = kwargs.pop('file_permission_key', None)
+        file_permission = _get_file_permission(file_permission, file_permission_key, None)
+
+        try:
+            return cast(Dict[str, Any], await self._client.directory.create(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=file_permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_directory(self, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 16
+                :caption: Deletes a directory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.directory.delete(timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def rename_directory(self, new_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            The new directory name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the directory.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the directory.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new Directory Client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new directory name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_dir_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_dir_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_dir_sas = self._query_str.strip('?')
+
+        new_directory_client = ShareDirectoryClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_dir_path,
+            credential=new_dir_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            await new_directory_client._client.directory.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                destination_lease_access_conditions=destination_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_directory_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self,
+        name_starts_with: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists all the directories and files under the directory.
+
+        :param str name_starts_with:
+            Filters the results to return only entities whose names
+            begin with the specified prefix.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START lists_directory]
+                :end-before: [END lists_directory]
+                :language: python
+                :dedent: 16
+                :caption: List directories and files.
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_files_and_directories_segment,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=DirectoryPropertiesPaged)
+
+    @distributed_trace
+    def list_handles(self, recursive: bool = False, **kwargs: Any) -> AsyncItemPaged["Handle"]:
+        """Lists opened handles on a directory or a file under the directory.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            recursive=recursive,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the directory exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            await self._client.directory.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def close_handle(self, handle: Union[str, "Handle"], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = await self._client.directory.force_close_handles(
+                handle_id,
+                marker=None,
+                recursive=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def close_all_handles(self, recursive: bool = False, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = await self._client.directory.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    recursive=recursive,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace_async
+    async def get_directory_properties(self, **kwargs: Any) -> "DirectoryProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified directory. The data returned does not include the directory's
+        list of files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: DirectoryProperties
+        :rtype: ~azure.storage.fileshare.DirectoryProperties
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = cast("DirectoryProperties", await self._client.directory.get_properties(
+                timeout=timeout,
+                cls=deserialize_directory_properties,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response
+
+    @distributed_trace_async
+    async def set_directory_metadata(self, metadata: Dict[str, Any], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the directory.
+
+        Each call to this operation replaces all existing metadata
+        attached to the directory. To remove all metadata from the directory,
+        call this operation with an empty metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the directory as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.directory.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the directory.
+
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.directory.set_properties(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_subdirectory(self, directory_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """Creates a new subdirectory and returns a client to interact
+        with the subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword metadata:
+            Name-value pairs associated with the subdirectory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START create_subdirectory]
+                :end-before: [END create_subdirectory]
+                :language: python
+                :dedent: 16
+                :caption: Create a subdirectory.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def delete_subdirectory(self, directory_name: str, **kwargs: Any) -> None:
+        """Deletes a subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_subdirectory]
+                :end-before: [END delete_subdirectory]
+                :language: python
+                :dedent: 16
+                :caption: Delete a subdirectory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        await subdir.delete_directory(timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def upload_file(
+        self, file_name: str,
+        data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> ShareFileClient:
+        """Creates a new file in the directory and returns a ShareFileClient
+        to interact with the file.
+
+        :param str file_name:
+            The name of the file.
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: ShareFileClient
+        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START upload_file_to_directory]
+                :end-before: [END upload_file_to_directory]
+                :language: python
+                :dedent: 16
+                :caption: Upload a file to a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        await file_client.upload_file(
+            data,
+            length=length,
+            **kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def delete_file(self, file_name: str, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is later
+        deleted during garbage collection.
+
+        :param str file_name:
+            The name of the file to delete.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_file_in_directory]
+                :end-before: [END delete_file_in_directory]
+                :language: python
+                :dedent: 16
+                :caption: Delete a file in a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        await file_client.delete_file(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py
new file mode 100644
index 00000000..278c5e01
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py
@@ -0,0 +1,502 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+# mypy: disable-error-code=override
+
+import asyncio
+import sys
+import warnings
+from io import BytesIO
+from itertools import islice
+from typing import (
+    Any, AsyncIterator, Awaitable, Callable,
+    cast, Generator, IO, Optional, Tuple,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError, ResourceModifiedError
+from .._download import _ChunkDownloader
+from .._shared.request_handlers import validate_and_format_range_headers
+from .._shared.response_handlers import parse_length_from_content_range, process_storage_error
+
+if TYPE_CHECKING:
+    from .._generated.aio.operations import FileOperations
+    from .._models import FileProperties
+    from .._shared.models import StorageConfiguration
+
+
+async def process_content(data: Any) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+
+    try:
+        await data.response.load_body()
+        return cast(bytes, data.response.body())
+    except Exception as error:
+        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) from error
+
+
+class _AsyncChunkDownloader(_ChunkDownloader):
+    def __init__(self, **kwargs: Any) -> None:
+        super(_AsyncChunkDownloader, self).__init__(**kwargs)
+        self.stream_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+        self.progress_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+
+    async def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            await self._write_to_stream(chunk_data, chunk_start)
+            await self._update_progress(length)
+
+    async def yield_chunk(self, chunk_start: int) -> bytes:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return await self._download_chunk(chunk_start, chunk_end - 1)
+
+    async def _update_progress(self, length: int) -> None:
+        if self.progress_lock_async:
+            async with self.progress_lock_async:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await cast(Callable[[int, Optional[int]], Awaitable[Any]], self.progress_hook)(
+                self.progress_total, self.total_size)
+
+    async def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock_async:
+            async with self.stream_lock_async:
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    async def _download_chunk(self, chunk_start: int, chunk_end: int) -> bytes:
+        range_header, range_validation = validate_and_format_range_headers(
+            chunk_start,
+            chunk_end,
+            check_content_md5=self.validate_content
+        )
+        try:
+            _, response = await cast(Awaitable[Any], self.client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self.validate_content,
+                data_stream_total=self.total_size,
+                download_stream_current=self.progress_total,
+                **self.request_options
+            ))
+            if response.properties.etag != self.etag:
+                raise ResourceModifiedError(message="The file has been modified while downloading.")
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        chunk_data = await process_content(response)
+        return chunk_data
+
+
+class _AsyncChunkIterator(object):
+    """Async iterator for chunks in file download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_AsyncChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> None:
+        raise TypeError("Async stream must be iterated asynchronously.")
+
+    def __aiter__(self) -> AsyncIterator[bytes]:
+        return self
+
+    async def __anext__(self) -> bytes:
+        if self._complete:
+            raise StopAsyncIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            chunk = next(self._iter_chunks)
+            self._current_content += await self._iter_downloader.yield_chunk(chunk)
+        except StopIteration as exc:
+            self._complete = True
+            # it's likely that there some data left in self._current_content
+            if self._current_content:
+                return self._current_content
+            raise StopAsyncIteration("Download complete") from exc
+
+        return self._get_chunk_data()
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
+    """A streaming object to download from Azure Storage."""
+
+    name: str
+    """The name of the file being downloaded."""
+    path: str
+    """The full path of the file."""
+    share: str
+    """The name of the share where the file is."""
+    properties: "FileProperties"
+    """The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file."""
+
+    def __init__(
+        self, client: "FileOperations" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        path: str = None,  # type: ignore [assignment]
+        share: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.path = path
+        self.share = share
+        self.size = 0
+
+        self._client = client
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._location_mode = None
+        self._download_complete = False
+        self._current_content = b""
+        self._file_size = 0
+        self._response = None
+        self._etag = ""
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        self._first_get_size = self._config.max_single_get_size if not self._validate_content \
+            else self._config.max_chunk_get_size
+        initial_request_start = self._start_range or 0
+        if self._end_range is not None and self._end_range - initial_request_start < self._first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + self._first_get_size - 1
+
+        self._initial_range = (initial_request_start, initial_request_end)
+
+    def __len__(self) -> int:
+        return self.size
+
+    async def _setup(self) -> None:
+        self._response = await self._initial_request()
+        self.properties = self._response.properties  # type: ignore [attr-defined]
+        self.properties.name = self.name
+        self.properties.path = self.path
+        self.properties.share = self.share
+
+        # Set the content length to the download size instead of the size of
+        # the last range
+        self.properties.size = self.size
+
+        # Overwrite the content range to the user requested range
+        self.properties.content_range = f'bytes {self._start_range}-{self._end_range}/{self._file_size}'
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+        if self.size == 0:
+            self._current_content = b""
+        else:
+            self._current_content = await process_content(self._response)
+
+    async def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content)
+
+        try:
+            location_mode, response = cast(Tuple[Optional[str], Any], await self._client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self._validate_content,
+                data_stream_total=None,
+                download_stream_current=0,
+                **self._request_options
+            ))
+
+            # Check the location we read from to ensure we use the same one
+            # for subsequent requests.
+            self._location_mode = location_mode
+
+            # Parse the total file size and adjust the download size if ranges
+            # were specified
+            self._file_size = parse_length_from_content_range(response.properties.content_range)
+            if self._file_size is None:
+                raise ValueError("Required Content-Range response header is missing or malformed.")
+
+            if self._end_range is not None:
+                # Use the length unless it is over the end of the file
+                self.size = min(self._file_size, self._end_range - self._start_range + 1)
+            elif self._start_range is not None:
+                self.size = self._file_size - self._start_range
+            else:
+                self.size = self._file_size
+
+        except HttpResponseError as error:
+            if self._start_range is None and error.response and error.response.status_code == 416:
+                # Get range will fail on an empty file. If the user did not
+                # request a range, do a regular get request in order to get
+                # any properties.
+                try:
+                    _, response = cast(Tuple[Optional[Any], Any], await self._client.download(
+                        validate_content=self._validate_content,
+                        data_stream_total=0,
+                        download_stream_current=0,
+                        **self._request_options
+                    ))
+                except HttpResponseError as e:
+                    process_storage_error(e)
+
+                # Set the download size to empty
+                self.size = 0
+                self._file_size = 0
+            else:
+                process_storage_error(error)
+
+        # If the file is small, the download is complete at this point.
+        # If file size is large, download the rest of the file in chunks.
+        if response.properties.size == self.size:
+            self._download_complete = True
+        self._etag = response.properties.etag
+        return response
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """
+        Iterate over chunks in the download stream.
+
+        :return: An iterator of the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+        """
+        if self.size == 0 or self._download_complete:
+            iter_downloader = None
+        else:
+            data_end = self._file_size
+            if self._end_range is not None:
+                # Use the length unless it is over the end of the file
+                data_end = min(self._file_size, self._end_range + 1)
+            iter_downloader = _AsyncChunkDownloader(
+                client=self._client,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._first_get_size,
+                start_range=self._initial_range[1] + 1,  # Start where the first download ended
+                end_range=data_end,
+                stream=None,
+                parallel=False,
+                validate_content=self._validate_content,
+                use_location=self._location_mode,
+                etag=self._etag,
+                **self._request_options)
+        return _AsyncChunkIterator(
+            size=self.size,
+            content=self._current_content,
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size
+        )
+
+    async def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :return: The entire blob content as bytes
+        :rtype: bytes
+        """
+        stream = BytesIO()
+        await self.readinto(stream)
+        data = stream.getvalue()
+        if self._encoding:
+            return data.decode(self._encoding)  # type: ignore [return-value]
+        return data
+
+    async def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :return: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        return await self.readall()
+
+    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this file, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :return: The contents of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return await self.readall()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        # the stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # Write the content to the user stream
+        stream.write(self._current_content)
+        if self._progress_hook:
+            await self._progress_hook(len(self._current_content), self.size)
+
+        if self._download_complete:
+            return self.size
+
+        data_end = self._file_size
+        if self._end_range is not None:
+            # Use the length unless it is over the end of the file
+            data_end = min(self._file_size, self._end_range + 1)
+
+        downloader = _AsyncChunkDownloader(
+            client=self._client,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._first_get_size,
+            start_range=self._initial_range[1] + 1,  # start where the first download ended
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            etag=self._etag,
+            **self._request_options)
+
+        dl_tasks = downloader.get_chunk_offsets()
+        running_futures = {
+            asyncio.ensure_future(downloader.process_chunk(d))
+            for d in islice(dl_tasks, 0, self._max_concurrency)
+        }
+        while running_futures:
+            # Wait for some download to finish before adding a new one
+            done, running_futures = await asyncio.wait(
+                running_futures, return_when=asyncio.FIRST_COMPLETED)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+            try:
+                for _ in range(0, len(done)):
+                    next_chunk = next(dl_tasks)
+                    running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+            except StopIteration:
+                break
+
+        if running_futures:
+            # Wait for the remaining downloads to finish
+            done, _running_futures = await asyncio.wait(running_futures)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+        return self.size
+
+    async def download_to_stream(self, stream, max_concurrency=1):
+        """Download the contents of this file to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded file.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        await self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py
new file mode 100644
index 00000000..6272949b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py
@@ -0,0 +1,1740 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, too-many-public-methods, docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import time
+import warnings
+from datetime import datetime
+from io import BytesIO
+from typing import (
+    Any, AnyStr, AsyncGenerator, AsyncIterable, Callable, cast,
+    Dict, IO, Iterable, List, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result
+from .._file_client_helpers import (
+    _format_url,
+    _from_file_url,
+    _get_ranges_options,
+    _parse_url,
+    _upload_range_from_url_options
+)
+from .._generated.aio import AzureFileStorage
+from .._generated.models import FileHTTPHeaders
+from .._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from .._serialize import (
+    get_access_conditions,
+    get_api_version,
+    get_dest_access_conditions,
+    get_rename_smb_properties,
+    get_smb_properties,
+    get_source_access_conditions
+)
+from .._shared.base_client import StorageAccountHostsMixin, parse_query
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers, get_length
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._shared.uploads_async import AsyncIterStreamer, FileChunkUploader, IterStreamer, upload_data_chunks
+from ._download_async import StorageStreamDownloader
+from ._lease_async import ShareLeaseClient
+from ._models import FileProperties, Handle, HandlesPaged
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings, NTFSAttributes
+    from .._shared.base_client import StorageConfiguration
+
+
+async def _upload_file_helper(
+    client: "ShareFileClient",
+    stream: Any,
+    size: Optional[int],
+    metadata: Optional[Dict[str, str]],
+    content_settings: Optional["ContentSettings"],
+    validate_content: bool,
+    timeout: Optional[int],
+    max_concurrency: int,
+    file_settings: "StorageConfiguration",
+    file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+    file_creation_time: Optional[Union[str, datetime]] = None,
+    file_last_write_time: Optional[Union[str, datetime]] = None,
+    file_permission: Optional[str] = None,
+    file_permission_key: Optional[str] = None,
+    progress_hook: Optional[Callable[[int, Optional[int]], None]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if size is None or size < 0:
+            raise ValueError("A content size must be specified for a File.")
+        response = await client.create_file(
+            size, content_settings=content_settings, metadata=metadata,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            permission_key=file_permission_key,
+            timeout=timeout,
+            **kwargs
+        )
+        if size == 0:
+            return response
+
+        responses = await upload_data_chunks(
+            service=client,
+            uploader_class=FileChunkUploader,
+            total_size=size,
+            chunk_size=file_settings.max_range_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            timeout=timeout,
+            **kwargs
+        )
+        return cast(Dict[str, Any], sorted(responses, key=lambda r: r.get('last_modified'))[-1])
+    except HttpResponseError as error:
+        process_storage_error(error)
+
+
+class ShareFileClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific file, although that file may not yet exist.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the
+        file, use the :func:`from_file_url` classmethod.
+    :param share_name:
+        The name of the share for the file.
+    :type share_name: str
+    :param str file_path:
+        The file path to the file with which to interact. If specified, this value will override
+        a file value specified in the file URL.
+    :param str snapshot:
+        An optional file snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+                          "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name, file_path)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.file_path = file_path.split('/')
+        self.file_name = self.file_path[-1]
+        self.directory_path = "/".join(self.file_path[:-1])
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareFileClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_file_url(
+        cls, file_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """A client to interact with a specific file, although that file may not yet exist.
+
+        :param str file_url: The full URI to the file.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        account_url, share_name, file_path, snapshot = _from_file_url(file_url, snapshot)
+        return cls(account_url, share_name, file_path, snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str):
+        return _format_url(self.scheme, hostname, self.share_name, self.file_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareFileClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str file_path:
+            The file path.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world_async.py
+                :start-after: [START create_file_client]
+                :end-before: [END create_file_client]
+                :language: python
+                :dedent: 12
+                :caption: Creates the file client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def acquire_lease(self, lease_id: Optional[str] = None, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the file does not have an active lease, the File
+        Service creates a lease on the blob and returns a new lease.
+
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The File Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient
+        """
+        kwargs['lease_duration'] = -1
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if the file exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the file exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            await self._client.file.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def create_file(
+        self, size: int,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new file.
+
+        Note that it only initializes the file with no content.
+
+        :param int size: Specifies the maximum size for the file,
+            up to 1 TB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 16
+                :caption: Create a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        content_settings = kwargs.pop('content_settings', None)
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        file_http_headers = None
+        if content_settings:
+            file_http_headers = FileHTTPHeaders(
+                file_cache_control=content_settings.cache_control,
+                file_content_type=content_settings.content_type,
+                file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+                file_content_encoding=content_settings.content_encoding,
+                file_content_language=content_settings.content_language,
+                file_content_disposition=content_settings.content_disposition,
+            )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.create(
+                file_content_length=size,
+                metadata=metadata,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                file_http_headers=file_http_headers,
+                lease_access_conditions=access_conditions,
+                headers=headers,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_file(
+        self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs
+    ) -> Dict[str, Any]:
+        """Uploads a new file.
+
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START upload_file]
+                :end-before: [END upload_file]
+                :language: python
+                :dedent: 16
+                :caption: Upload a file.
+        """
+        metadata = kwargs.pop('metadata', None)
+        content_settings = kwargs.pop('content_settings', None)
+        max_concurrency = kwargs.pop('max_concurrency', 1)
+        validate_content = kwargs.pop('validate_content', False)
+        progress_hook = kwargs.pop('progress_hook', None)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        if length is None:
+            length = get_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        stream: Optional[Any] = None
+        if isinstance(data, bytes):
+            stream = BytesIO(data)
+        elif hasattr(data, "read"):
+            stream = data
+        elif hasattr(data, "__iter__"):
+            stream = IterStreamer(data, encoding=encoding)
+        elif hasattr(data, '__aiter__'):
+            stream = AsyncIterStreamer(cast(AsyncGenerator, data), encoding=encoding)
+        else:
+            raise TypeError(f"Unsupported data type: {type(data)}")
+        return await _upload_file_helper(
+            self,
+            stream,
+            length,
+            metadata,
+            content_settings,
+            validate_content,
+            timeout,
+            max_concurrency,
+            self._config,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            file_permission_key=permission_key,
+            progress_hook=progress_hook,
+            **kwargs)
+
+    @distributed_trace_async
+    async def start_copy_from_url(self, source_url: str, **kwargs: Any) -> Dict[str, Any]:
+        """Initiates the copying of data from a source URL into the file
+        referenced by the client.
+
+        The status of this copy operation can be found using the `get_properties`
+        method.
+
+        :param str source_url:
+            Specifies the URL of the source file.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file. This setting can be
+            used if Permission size is <= 8KB, otherwise permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword str permission_key:
+            Key of the permission to be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            This value can be set to "source" to copy file attributes from the source file to the target file,
+            or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes
+            to set on the target file. If this is not set, the default value is "Archive".
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
+        :keyword file_creation_time:
+            This value can be set to "source" to copy the creation time from the source file to the target file,
+            or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, creation time will be set to the date time value of the creation
+            (or when it was overwritten) of the target file by copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_creation_time: str or ~datetime.datetime
+        :keyword file_last_write_time:
+            This value can be set to "source" to copy the last write time from the source file to the target file, or
+            a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, value will be the last write time to the file by the copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_last_write_time: str or ~datetime.datetime
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.9.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword bool ignore_read_only:
+            Specifies the option to overwrite the target file if it already exists and has read-only attribute set.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword bool set_archive_attribute:
+            Specifies the option to set the archive attribute on the target file.
+            True means the archive attribute will be set on the target file despite attribute
+            overrides or the source file state.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword file_mode_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the mode bits of the file. Possible values are:
+
+            source - The mode on the destination file is copied from the source file.
+            override - The mode on the destination file is determined via the file_mode keyword.
+        :paramtype file_mode_copy_mode: Literal['source', 'override']
+        :keyword owner_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the owner and group of the file. Possible values are:
+
+            source - The owner and group on the destination file is copied from the source file.
+            override - The owner and group on the destination file is determined via the owner and group keywords.
+        :paramtype owner_copy_mode: Literal['source', 'override']
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Response after data copying operation has been initiated.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START copy_file_from_url]
+                :end-before: [END copy_file_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Copy a file from a URL
+        """
+        metadata = kwargs.pop('metadata', None)
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        owner = kwargs.pop('owner', None)
+        group = kwargs.pop('group', None)
+        file_mode = kwargs.pop('file_mode', None)
+        file_mode_copy_mode = kwargs.pop('file_mode_copy_mode', None)
+        file_owner_copy_mode = kwargs.pop('owner_copy_mode', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        kwargs.update(get_smb_properties(kwargs))
+        try:
+            return cast(Dict[str, Any], await self._client.file.start_copy(
+                source_url,
+                metadata=metadata,
+                lease_access_conditions=access_conditions,
+                owner=owner,
+                group=group,
+                file_mode=file_mode,
+                file_mode_copy_mode=file_mode_copy_mode,
+                file_owner_copy_mode=file_owner_copy_mode,
+                headers=headers,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def abort_copy(self, copy_id: Union[str, FileProperties], **kwargs: Any) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination file with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID, or an
+            instance of FileProperties.
+        :type copy_id: str or ~azure.storage.fileshare.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        if isinstance(copy_id, FileProperties):
+            copy_id = copy_id.copy.id
+        elif isinstance(copy_id, Dict):
+            copy_id = copy_id['copy_id']
+        try:
+            await self._client.file.abort_copy(copy_id=copy_id,
+                                               lease_access_conditions=access_conditions,
+                                               timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def download_file(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader:
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the file into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START download_file]
+                :end-before: [END download_file]
+                :language: python
+                :dedent: 16
+                :caption: Download a file.
+        """
+        if length is not None and offset is None:
+            raise ValueError("Offset value must not be None if length is set.")
+
+        range_end = None
+        if length is not None:
+            if offset is None:
+                raise ValueError("Offset value must not be None if length is set.")
+            range_end = offset + length - 1  # Service actually uses an end-range inclusive index
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+
+        downloader = StorageStreamDownloader(
+            client=self._client.file,
+            config=self._config,
+            start_range=offset,
+            end_range=range_end,
+            name=self.file_name,
+            path='/'.join(self.file_path),
+            share=self.share_name,
+            lease_access_conditions=access_conditions,
+            cls=deserialize_file_stream,
+            **kwargs
+        )
+        await downloader._setup()  # pylint: disable=protected-access
+        return downloader
+
+    @distributed_trace_async
+    async def delete_file(self, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is
+        later deleted during garbage collection.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 16
+                :caption: Delete a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def rename_file(self, new_name: str, **kwargs: Any) -> "ShareFileClient":
+        """
+        Rename the source file.
+
+        :param str new_name:
+            The new file name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the file. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the file.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the file.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str content_type:
+            The Content Type of the new file.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword source_lease:
+            Required if the source file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new file name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_file_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_file_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_file_sas = self._query_str.strip('?')
+
+        new_file_client = ShareFileClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_file_path,
+            credential=new_file_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        file_http_headers = None
+        content_type = kwargs.pop('content_type', None)
+        if content_type:
+            file_http_headers = FileHTTPHeaders(
+                file_content_type=content_type
+            )
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None))
+        dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            await new_file_client._client.file.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                file_http_headers=file_http_headers,
+                source_lease_access_conditions=source_access_conditions,
+                destination_lease_access_conditions=dest_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_file_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_file_properties(self, **kwargs: Any) -> FileProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: FileProperties
+        :rtype: ~azure.storage.fileshare.FileProperties
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            file_props = cast(FileProperties, await self._client.file.get_properties(
+                sharesnapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=deserialize_file_properties,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        file_props.name = self.file_name
+        file_props.share = self.share_name
+        file_props.snapshot = self.snapshot
+        file_props.path = "/".join(self.file_path)
+        return file_props
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, content_settings: "ContentSettings",
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the file.
+
+        :param ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        file_content_length = kwargs.pop("size", None)
+        file_http_headers = FileHTTPHeaders(
+            file_cache_control=content_settings.cache_control,
+            file_content_type=content_settings.content_type,
+            file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+            file_content_encoding=content_settings.content_encoding,
+            file_content_language=content_settings.content_language,
+            file_content_disposition=content_settings.content_disposition,
+        )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_http_headers(
+                file_content_length=file_content_length,
+                file_http_headers=file_http_headers,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_file_metadata(self, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """Sets user-defined metadata for the specified file as one or more
+        name-value pairs.
+
+        Each call to this operation replaces all existing metadata
+        attached to the file. To remove all metadata from the file,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_metadata(
+                metadata=metadata, lease_access_conditions=access_conditions,
+                timeout=timeout, cls=return_response_headers, headers=headers, **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_range(
+        self, data: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Upload a range of bytes to a file.
+
+        :param bytes data:
+            The data to upload.
+        :param int offset:
+            Start of byte range to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        validate_content = kwargs.pop('validate_content', False)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        file_last_write_mode = kwargs.pop('file_last_write_mode', None)
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        end_range = offset + length - 1  # Reformat to an inclusive range index
+        content_range = f'bytes={offset}-{end_range}'
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range(
+                range=content_range,
+                content_length=length,
+                optionalbody=data,
+                timeout=timeout,
+                validate_content=validate_content,
+                file_last_written_mode=file_last_write_mode,
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_range_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint.
+
+        :param int offset:
+            Start of byte range to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies an Azure file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.file.core.windows.net/myshare/mydir/myfile
+            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after writing to the specified range of the destination Azure File endpoint.
+        :rtype: dict[str, Any]
+        """
+        options = _upload_range_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> List[Dict[str, int]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A list of valid ranges.
+        :rtype: List[dict[str, int]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            ranges = await self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges]
+
+    @distributed_trace_async
+    async def get_ranges_diff(
+        self, previous_sharesnapshot: Union[str, Dict[str, Any]],
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        include_renames: Optional[bool] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        .. versionadded:: 12.6.0
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :param str previous_sharesnapshot:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous file snapshot to be compared
+            against a more recent snapshot or the current file.
+        :keyword Optional[bool] include_renames:
+            Only valid if previous_sharesnapshot parameter is provided. Specifies whether the changed ranges for
+            a file that has been renamed or moved between the target snapshot (or live file) and the previous
+            snapshot should be listed. If set to True, the valid changed ranges for the file will be returned.
+            If set to False, the operation will result in a 409 (Conflict) response.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled file ranges, the 2nd element is cleared file ranges.
+        :rtype: tuple[list[dict[str, int]], list[dict[str, int]]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_sharesnapshot=previous_sharesnapshot,
+            support_rename=include_renames,
+            **kwargs)
+        try:
+            ranges = await self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_file_ranges_result(ranges)
+
+    @distributed_trace_async
+    async def clear_range(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Any]:
+        """Clears the specified range and releases the space used in storage for
+        that range.
+
+        :param int offset:
+            Start of byte range to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+
+        if offset is None or offset % 512 != 0:
+            raise ValueError("offset must be an integer that aligns with 512 bytes file size")
+        if length is None or length % 512 != 0:
+            raise ValueError("length must be an integer that aligns with 512 bytes file size")
+        end_range = length + offset - 1  # Reformat to an inclusive range index
+        content_range = f"bytes={offset}-{end_range}"
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range(
+                timeout=timeout,
+                cls=return_response_headers,
+                content_length=0,
+                optionalbody=None,
+                file_range_write="clear",
+                range=content_range,
+                lease_access_conditions=access_conditions,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def resize_file(self, size: int, **kwargs: Any) -> Dict[str, Any]:
+        """Resizes a file to the specified size.
+
+        :param int size:
+            Size to resize file to (in bytes)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_http_headers(
+                file_content_length=size,
+                file_attributes=None,
+                file_creation_time=None,
+                file_last_write_time=None,
+                file_permission="preserve",
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_handles(self, **kwargs: Any) -> AsyncItemPaged[Handle]:
+        """Lists handles for file.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop("results_per_page", None)
+        command = functools.partial(
+            self._client.file.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace_async
+    async def close_handle(self, handle: Union[str, Handle], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = await self._client.file.force_close_handles(
+                handle_id,
+                marker=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def close_all_handles(self, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = await self._client.file.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace_async
+    async def create_hardlink(
+        self, target: str,
+        *,
+        lease: Optional[Union[ShareLeaseClient, str]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """NFS only. Creates a hard link to the file specified by path.
+
+        :param str target:
+            Specifies the path of the target file to which the link will be created, up to 2 KiB in length.
+            It should be the full path of the target starting from the root. The target file must be in the
+            same share and the same storage account.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str or None
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (ETag and last modified).
+        :rtype: dict[str, Any]
+        """
+        try:
+            return cast(Dict[str, Any], await self._client.file.create_hard_link(
+                target_file=target,
+                lease_access_conditions=lease,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py
new file mode 100644
index 00000000..70d6a392
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py
@@ -0,0 +1,249 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import Union, Optional, Any, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from .._shared.response_handlers import return_response_headers, process_storage_error
+from .._generated.aio.operations import FileOperations, ShareOperations
+
+if TYPE_CHECKING:
+    from azure.storage.fileshare.aio import ShareClient, ShareFileClient
+
+
+class ShareLeaseClient:  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new ShareLeaseClient.
+
+    This client provides lease operations on a ShareClient or ShareFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file or share to lease.
+    :type client: ~azure.storage.fileshare.ShareFileClient or
+        ~azure.storage.fileshare.ShareClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["ShareFileClient", "ShareClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'file_name'):
+            self._client = client._client.file  # type: ignore
+            self._snapshot = None
+        elif hasattr(client, 'share_name'):
+            self._client = client._client.share
+            self._snapshot = client.snapshot
+        else:
+            raise TypeError("Lease must use ShareFileClient or ShareClient.")
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args: Any):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, **kwargs: Any) -> None:
+        """Requests a new lease. This operation establishes and manages a lock on a
+        file or share for write and delete operations. If the file or share does not have an active lease,
+        the File or Share service creates a lease on the file or share. If the file has an active lease,
+        you can only request a new lease using the active lease ID.
+
+
+        If the file or share does not have an active lease, the File or Share service creates a
+        lease on the file and returns a new lease ID.
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be
+            between 15 and 60 seconds. A share lease duration cannot be changed
+            using renew or change. Default is -1 (infinite share lease).
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        try:
+            lease_duration = kwargs.pop('lease_duration', -1)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace_async
+    async def renew(self, **kwargs: Any) -> None:
+        """Renews the share lease.
+
+        The share lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the share. Note that
+        the lease may be renewed even if it has expired as long as the share
+        has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        .. versionadded:: 12.6.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        if isinstance(self._client, FileOperations):
+            raise TypeError("Lease renewal operations are only valid for ShareClient.")
+        try:
+            response = await self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                sharesnapshot=self._snapshot,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def release(self, **kwargs: Any) -> None:
+        """Releases the lease. The lease may be released if the lease ID specified on the request matches
+        that associated with the share or file. Releasing the lease allows another client to immediately acquire
+        the lease for the share or file as soon as the release is complete.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and
+        a new lease ID in x-ms-proposed-lease-id.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The File or Share service raises an error
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def break_lease(self, **kwargs: Any) -> int:
+        """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. An infinite lease breaks immediately.
+
+        Once a lease is broken, it cannot be changed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :keyword int lease_break_period:
+            This is the proposed duration of seconds that the share lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the share lease. If longer, the time remaining on the share lease is used.
+            A new share lease will not be available before the break period has
+            expired, but the share lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration share lease breaks after the remaining share lease
+            period elapses, and an infinite share lease breaks immediately.
+
+            .. versionadded:: 12.5.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        try:
+            lease_break_period = kwargs.pop('lease_break_period', None)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            if isinstance(self._client, ShareOperations):
+                kwargs['break_period'] = lease_break_period
+            if isinstance(self._client, FileOperations) and lease_break_period:
+                raise TypeError("Setting a lease break period is only applicable to Share leases.")
+
+            response = await self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time')  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py
new file mode 100644
index 00000000..dd7335bb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py
@@ -0,0 +1,208 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+
+from typing import (
+    Any, Callable, Dict, List, Optional
+)
+
+from azure.core.async_paging import AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
+from .._generated.models import DirectoryItem
+from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties
+
+
+def _wrap_item(item):
+    if isinstance(item, DirectoryItem):
+        return {'name': item.name, 'is_directory': True}
+    return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
+
+
+class SharePropertiesPaged(AsyncPageIterator):
+    """An iterable of Share properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only shares whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A filename prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results to retrieve per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: List[ShareProperties]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(SharePropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                prefix=self.prefix,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class HandlesPaged(AsyncPageIterator):
+    """An iterable of Handles.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token to retrieve the next page of results.
+    """
+
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results.
+        The available options include "primary" and "secondary"."""
+    current_page: List[Handle]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(HandlesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = [Handle._from_generated(h) for h in self._response.handle_list]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class DirectoryPropertiesPaged(AsyncPageIterator):
+    """An iterable for the contents of a directory.
+
+    This iterable will yield dicts for the contents of the directory. The dicts
+    will have the keys 'name' (str) and 'is_directory' (bool).
+    Items that are files (is_directory=False) will have an additional 'content_length' key.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only directories whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A file name prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str] = None
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available options include "primary" and "secondary"."""
+    current_page: List[Dict[str, Any]]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(DirectoryPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                prefix=self.prefix,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access
+        self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access
+        return self._response.next_marker or None, self.current_page
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py
new file mode 100644
index 00000000..7f66feb9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py
@@ -0,0 +1,991 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import sys
+import warnings
+from typing import (
+    Any, cast, Dict, Literal, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_permission, deserialize_share_properties
+from .._generated.aio import AzureFileStorage
+from .._generated.models import (
+    DeleteSnapshotsOptionType,
+    ShareStats,
+    SignedIdentifier
+)
+from .._models import ShareProtocols
+from .._parser import _parse_snapshot
+from .._share_client_helpers import (
+    _create_permission_for_share_options,
+    _format_url,
+    _from_share_url,
+    _parse_url
+)
+from .._shared.policies_async import ExponentialRetry
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.request_handlers import add_metadata_headers, serialize_iso
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+from .._serialize import get_access_conditions, get_api_version
+from ..aio._lease_async import ShareLeaseClient
+from ._directory_client_async import ShareDirectoryClient
+from ._file_client_async import ShareFileClient
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import AccessPolicy, DirectoryProperties, FileProperties, ShareProperties
+
+
+class ShareClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific share, although that share may not yet exist.
+
+    For operations relating to a specific directory or file in this share, the clients for
+    those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the share,
+        use the :func:`from_share_url` classmethod.
+    :param share_name:
+        The name of the share with which to interact.
+    :type share_name: str
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self._query_str, credential = self._format_query_string(
+            sas_token=sas_token, credential=credential, share_snapshot=self.snapshot)
+        super(ShareClient, self).__init__(
+            parsed_url=parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_share_url(
+        cls, share_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """
+        :param str share_url: The full URI to the share.
+        :param snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+        """
+        account_url, share_name, path_snapshot = _from_share_url(share_url, snapshot)
+        return cls(account_url, share_name, path_snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param str share_name: The name of the share.
+        :param snapshot:
+            The optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share_client_from_conn_string]
+                :end-before: [END create_share_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client from connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs)
+
+    def get_directory_client(self, directory_path: Optional[str] = None) -> ShareDirectoryClient:
+        """Get a client to interact with the specified directory.
+        The directory need not already exist.
+
+        :param str directory_path:
+            Path to the specified directory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)
+
+    def get_file_client(self, file_path: str) -> ShareFileClient:
+        """Get a client to interact with the specified file.
+        The file need not already exist.
+
+        :param str file_path:
+            Path to the specified file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
+        """
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+
+        return ShareFileClient(
+            self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)
+
+    @distributed_trace_async
+    async def acquire_lease(self, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the share does not have an active lease, the Share
+        Service creates a lease on the share and returns a new lease.
+
+        .. versionadded:: 12.5.0
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The Share Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.ShareLeaseClient
+        """
+        kwargs['lease_duration'] = kwargs.pop('lease_duration', -1)
+        lease_id = kwargs.pop('lease_id', None)
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def create_share(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new Share under the account. If a share with the
+        same name already exists, the operation fails.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int quota:
+            The quota to be allotted.
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword protocols:
+            Protocols to enable on the share. Only one protocol can be enabled on the share.
+        :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'.
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share]
+                :end-before: [END create_share]
+                :language: python
+                :dedent: 12
+                :caption: Creates a file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        access_tier = kwargs.pop('access_tier', None)
+        timeout = kwargs.pop('timeout', None)
+        root_squash = kwargs.pop('root_squash', None)
+        protocols = kwargs.pop('protocols', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]:
+            raise ValueError("The enabled protocol must be set to either SMB or NFS.")
+        if root_squash and protocols not in ['NFS', ShareProtocols.NFS]:
+            raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.")
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        try:
+            return cast(Dict[str, Any], await self._client.share.create(
+                timeout=timeout,
+                metadata=metadata,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                enabled_protocols=protocols,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_snapshot(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a snapshot of the share.
+
+        A snapshot is a read-only version of a share that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a share as it appears at a moment in time.
+
+        A snapshot of a share has the same name as the base share from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Share-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share_snapshot]
+                :end-before: [END create_share_snapshot]
+                :language: python
+                :dedent: 16
+                :caption: Creates a snapshot of the file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.share.create_snapshot(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_share(
+        self, delete_snapshots: Optional[Union[bool, Literal['include', 'include-leased']]] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param delete_snapshots:
+            Indicates if snapshots are to be deleted. If "True" or enum "include", snapshots will
+            be deleted (but not include leased). To include leased snapshots, specify the "include-leased"
+            enum.
+        :type delete_snapshots:
+            Optional[Union[bool, Literal['include', 'include-leased']]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START delete_share]
+                :end-before: [END delete_share]
+                :language: python
+                :dedent: 16
+                :caption: Deletes the share and any snapshots.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        delete_include = None
+        if isinstance(delete_snapshots, bool) and delete_snapshots:
+            delete_include = DeleteSnapshotsOptionType.INCLUDE
+        else:
+            if delete_snapshots == 'include':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE
+            elif delete_snapshots == 'include-leased':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE_LEASED
+        try:
+            await self._client.share.delete(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                delete_snapshots=delete_include,
+                lease_access_conditions=access_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_properties(self, **kwargs: Any) -> "ShareProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified share. The data returned does not include the shares's
+        list of files or directories.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: The share properties.
+        :rtype: ~azure.storage.fileshare.ShareProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world_async.py
+                :start-after: [START get_share_properties]
+                :end-before: [END get_share_properties]
+                :language: python
+                :dedent: 16
+                :caption: Gets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            props = cast("ShareProperties", await self._client.share.get_properties(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                cls=deserialize_share_properties,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        props.name = self.share_name
+        props.snapshot = self.snapshot
+        return props
+
+    @distributed_trace_async
+    async def set_share_quota(self, quota: int, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the quota for the share.
+
+        :param int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_quota]
+                :end-before: [END set_share_quota]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share quota.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=None,
+                cls=return_response_headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_share_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the share properties.
+
+        .. versionadded:: 12.3.0
+
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :keyword int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_properties]
+                :end-before: [END set_share_properties]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        access_tier = kwargs.pop('access_tier', None)
+        quota = kwargs.pop('quota', None)
+        root_squash = kwargs.pop('root_squash', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if all(parameter is None for parameter in [access_tier, quota, root_squash]):
+            raise ValueError("set_share_properties should be called with at least one parameter.")
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                lease_access_conditions=access_conditions,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_share_metadata(self, metadata: Dict[str, str], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the share.
+
+        Each call to this operation replaces all existing metadata
+        attached to the share. To remove all metadata from the share,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the share as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_metadata]
+                :end-before: [END set_share_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share metadata.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the share. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = await self._client.share.get_access_policy(
+                timeout=timeout,
+                cls=return_headers_and_deserialized,
+                lease_access_conditions=access_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('share_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace_async
+    async def set_share_access_policy(
+        self, signed_identifiers: Dict[str, "AccessPolicy"],
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets the permissions for the share, or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the share. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.fileshare.AccessPolicy]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value))
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_access_policy(
+                share_acl=identifiers or None,
+                timeout=timeout,
+                cls=return_response_headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_stats(self, **kwargs: Any) -> int:
+        """Gets the approximate size of the data stored on the share in bytes.
+
+        Note that this value may not include all recently created
+        or recently re-sized files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :return: The approximate size of the data (in bytes) stored on the share.
+        :rtype: int
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = cast(ShareStats, await self._client.share.get_statistics(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+            return stats.share_usage_bytes
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self, directory_name: Optional[str] = None,
+        name_starts_with: Optional[str] = None,
+        marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists the directories and files under the share.
+
+        :param str directory_name:
+            Name of a directory.
+        :param str name_starts_with:
+            Filters the results to return only directories whose names
+            begin with the specified prefix.
+        :param str marker:
+            An opaque continuation token. This value can be retrieved from the
+            next_marker field of a previous generator object. If specified,
+            this generator will begin returning results from this point.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START share_list_files_in_dir]
+                :end-before: [END share_list_files_in_dir]
+                :language: python
+                :dedent: 16
+                :caption: List directories and files in the share.
+        """
+        timeout = kwargs.pop('timeout', None)
+        directory = self.get_directory_client(directory_name)
+        return directory.list_directories_and_files(
+            name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def create_permission_for_share(self, file_permission: str, **kwargs: Any) -> Optional[str]:
+        """Create a permission (a security descriptor) at the share level.
+
+        This 'permission' can be used for the files/directories in the share.
+        If a 'permission' already exists, it shall return the key of it, else
+        creates a new permission at the share level and return its key.
+
+        :param str file_permission:
+            File permission, a Portable SDDL
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission key
+        :rtype: str or None
+        """
+        timeout = kwargs.pop('timeout', None)
+        options = _create_permission_for_share_options(file_permission, timeout=timeout, **kwargs)
+        try:
+            return cast(Optional[str], await self._client.share.create_permission(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_permission_for_share(self, permission_key: str, **kwargs: Any) -> str:
+        """Get a permission (a security descriptor) for a given key.
+
+        This 'permission' can be used for the files/directories in the share.
+
+        :param str permission_key:
+            Key of the file permission to retrieve
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission (a portable SDDL)
+        :rtype: str
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(str, await self._client.share.get_permission(
+                file_permission_key=permission_key,
+                cls=deserialize_permission,
+                timeout=timeout,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_directory(self, directory_name: str, **kwargs: Any) -> ShareDirectoryClient:
+        """Creates a directory in the share and returns a client to interact
+        with the directory.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        directory = self.get_directory_client(directory_name)
+        kwargs.setdefault('merge_span', True)
+        await directory.create_directory(**kwargs)
+        return directory
+
+    @distributed_trace_async
+    async def delete_directory(self, directory_name: str, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        directory = self.get_directory_client(directory_name)
+        await directory.delete_directory(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py
new file mode 100644
index 00000000..bf33ac78
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py
@@ -0,0 +1,490 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import warnings
+from typing import (
+    Union, Optional, Any, Dict, List,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.pipeline import AsyncPipeline
+from .._generated.aio import AzureFileStorage
+from .._generated.models import StorageServiceProperties
+from .._models import CorsRule, service_properties_deserialize, ShareProperties
+from .._serialize import get_api_version
+from .._share_service_client_helpers import _parse_url
+from .._shared.base_client import StorageAccountHostsMixin, parse_query
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.response_handlers import process_storage_error
+from ._models import SharePropertiesPaged
+from ._share_client_async import ShareClient
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import Metrics, ShareProtocolSettings
+
+
+class ShareServiceClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with the File Share Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete shares within the account.
+    For operations relating to a specific share, a client for that entity
+    can also be retrieved using the :func:`get_share_client` function.
+
+    :param str account_url:
+        The URL to the file share storage account. Any other entities included
+        in the URL path (e.g. share or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/file_samples_authentication_async.py
+            :start-after: [START create_share_service_client]
+            :end-before: [END create_share_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Create the share service client with url and credential.
+    """
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ShareServiceClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A File Share service client.
+        :rtype: ~azure.storage.fileshare.ShareServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_authentication_async.py
+                :start-after: [START create_share_service_client_from_conn_string]
+                :end-before: [END create_share_service_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Create the share service client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's File Share service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary containing file service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START get_service_properties]
+                :end-before: [END get_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Get file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_service_properties(
+        self, hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        protocol: Optional["ShareProtocolSettings"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's File Share service, including
+        Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for files.
+        :type hour_metrics: ~azure.storage.fileshare.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for files.
+        :type minute_metrics: ~azure.storage.fileshare.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.fileshare.CorsRule]
+        :param protocol:
+            Sets protocol settings
+        :type protocol: ~azure.storage.fileshare.ShareProtocolSettings
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START set_service_properties]
+                :end-before: [END set_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Sets file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        props = StorageServiceProperties(
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors),  # pylint: disable=protected-access
+            protocol=protocol
+        )
+        try:
+            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_shares(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: Optional[bool] = False,
+        include_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> AsyncItemPaged[ShareProperties]:
+        """Returns auto-paging iterable of dict-like ShareProperties under the specified account.
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all shares have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only shares whose names
+            begin with the specified name_starts_with.
+        :param bool include_metadata:
+            Specifies that share metadata be returned in the response.
+        :param bool include_snapshots:
+            Specifies that share snapshot be returned in the response.
+        :keyword bool include_deleted:
+            Specifies that deleted shares be returned in the response.
+            This is only for share soft delete enabled account.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ShareProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_list_shares]
+                :end-before: [END fsc_list_shares]
+                :language: python
+                :dedent: 16
+                :caption: List shares in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        include = []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        if include_metadata:
+            include.append('metadata')
+        if include_snapshots:
+            include.append('snapshots')
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_shares_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=SharePropertiesPaged)
+
+    @distributed_trace_async
+    async def create_share(self, share_name: str, **kwargs: Any) -> ShareClient:
+        """Creates a new share under the specified account. If the share
+        with the same name already exists, the operation fails. Returns a client with
+        which to interact with the newly created share.
+
+        :param str share_name: The name of the share to create.
+        :keyword dict[str, str] metadata:
+            A dict with name_value pairs to associate with the
+            share as metadata. Example:{'Category':'test'}
+        :keyword int quota:
+            Quota in bytes.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :return: A ShareClient for the newly created Share.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_create_shares]
+                :end-before: [END fsc_create_shares]
+                :language: python
+                :dedent: 12
+                :caption: Create a share in the file share service.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        timeout = kwargs.pop('timeout', None)
+        provisioned_iops = kwargs.pop('provisioned_iops', None)
+        provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        await share.create_share(
+            metadata=metadata,
+            quota=quota,
+            timeout=timeout,
+            provisioned_iops=provisioned_iops,
+            provisioned_bandwidth_mibps=provisioned_bandwidth_mibps,
+            **kwargs
+        )
+        return share
+
+    @distributed_trace_async
+    async def delete_share(
+        self, share_name: Union[ShareProperties, str],
+        delete_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param share_name:
+            The share to delete. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share_name: str or ~azure.storage.fileshare.ShareProperties
+        :param bool delete_snapshots:
+            Indicates if snapshots are to be deleted.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_delete_shares]
+                :end-before: [END fsc_delete_shares]
+                :language: python
+                :dedent: 16
+                :caption: Delete a share in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        await share.delete_share(
+            delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def undelete_share(self, deleted_share_name: str, deleted_share_version: str, **kwargs: Any) -> ShareClient:
+        """Restores soft-deleted share.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.2.0
+
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_share_name:
+            Specifies the name of the deleted share to restore.
+        :param str deleted_share_version:
+            Specifies the version of the deleted share to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: A ShareClient for the undeleted Share.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+        """
+        share = self.get_share_client(deleted_share_name)
+        try:
+            await share._client.share.restore(deleted_share_name=deleted_share_name,  # pylint: disable=protected-access
+                                              deleted_share_version=deleted_share_version,
+                                              timeout=kwargs.pop('timeout', None), **kwargs)
+            return share
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_share_client(
+        self, share: Union[ShareProperties, str],
+        snapshot: Optional[Union[Dict[str, Any], str]] = None
+    ) -> ShareClient:
+        """Get a client to interact with the specified share.
+        The share need not already exist.
+
+        :param share:
+            The share. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share: str or ~azure.storage.fileshare.ShareProperties
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :returns: A ShareClient.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START get_share_client]
+                :end-before: [END get_share_client]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client.
+        """
+        if isinstance(share, ShareProperties):
+            share_name = share.name
+        else:
+            share_name = share
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return ShareClient(
+            self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
+            api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)