about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/storage/filedatalake
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/storage/filedatalake')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py110
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py759
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py983
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py273
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py633
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py241
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py1074
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py112
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py2050
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py114
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py628
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py1968
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py161
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py90
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py1041
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py888
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py2845
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py208
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed1
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py173
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py1158
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py1118
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py73
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py185
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py54
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py245
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py458
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py280
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py19
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py585
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py53
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py694
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py296
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py270
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py200
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py252
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py604
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py460
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py462
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py105
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py7
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py24
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py721
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py735
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py269
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py570
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py1004
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py176
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py40
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py901
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py104
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed0
66 files changed, 27058 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py
new file mode 100644
index 00000000..1dbc5064
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py
@@ -0,0 +1,110 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download import StorageStreamDownloader
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._file_system_client import FileSystemClient
+from ._data_lake_service_client import DataLakeServiceClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._models import (
+    AccessControlChangeCounters,
+    AccessControlChangeFailure,
+    AccessControlChangeResult,
+    AccessControlChanges,
+    AccessPolicy,
+    AccountSasPermissions,
+    AnalyticsLogging,
+    ArrowDialect,
+    ArrowType,
+    ContentSettings,
+    CorsRule,
+    CustomerProvidedEncryptionKey,
+    DataLakeFileQueryError,
+    DeletedPathProperties,
+    DelimitedJsonDialect,
+    DelimitedTextDialect,
+    DirectoryProperties,
+    DirectorySasPermissions,
+    EncryptionScopeOptions,
+    FileProperties,
+    FileSasPermissions,
+    FileSystemProperties,
+    FileSystemPropertiesPaged,
+    FileSystemSasPermissions,
+    LeaseProperties,
+    LocationMode,
+    Metrics,
+    PathProperties,
+    PublicAccess,
+    QuickQueryDialect,
+    ResourceTypes,
+    RetentionPolicy,
+    StaticWebsite,
+    UserDelegationKey,
+)
+
+from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \
+    generate_file_sas
+
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.models import StorageErrorCode, Services
+from ._version import VERSION
+
+__version__ = VERSION
+
+__all__ = [
+    'AccessControlChangeCounters',
+    'AccessControlChangeFailure',
+    'AccessControlChangeResult',
+    'AccessControlChanges',
+    'AccessPolicy',
+    'AccountSasPermissions',
+    'AnalyticsLogging',
+    'ArrowDialect',
+    'ArrowType',
+    'ContentSettings',
+    'CorsRule',
+    'CustomerProvidedEncryptionKey',
+    'DataLakeDirectoryClient',
+    'DataLakeFileClient',
+    'DataLakeFileQueryError',
+    'DataLakeFileQueryError',
+    'DataLakeLeaseClient',
+    'DataLakeServiceClient',
+    'DeletedPathProperties',
+    'DelimitedJsonDialect',
+    'DelimitedTextDialect',
+    'DirectoryProperties',
+    'DirectorySasPermissions',
+    'EncryptionScopeOptions',
+    'ExponentialRetry',
+    'FileProperties',
+    'FileSasPermissions',
+    'FileSystemClient',
+    'FileSystemProperties',
+    'FileSystemPropertiesPaged',
+    'FileSystemSasPermissions',
+    'generate_account_sas',
+    'generate_directory_sas',
+    'generate_file_sas',
+    'generate_file_system_sas',
+    'LeaseProperties',
+    'LinearRetry',
+    'LocationMode',
+    'Metrics',
+    'PathProperties',
+    'PublicAccess',
+    'QuickQueryDialect',
+    'ResourceTypes',
+    'RetentionPolicy',
+    'StaticWebsite',
+    'StorageErrorCode',
+    'StorageStreamDownloader',
+    'UserDelegationKey',
+    'VERSION',
+    'Services'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py
new file mode 100644
index 00000000..0aa26b8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py
@@ -0,0 +1,759 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+
+from ._data_lake_file_client import DataLakeFileClient
+from ._deserialize import deserialize_dir_properties
+from ._list_paths_helper import PathPropertiesPaged
+from ._models import DirectoryProperties, FileProperties
+from ._path_client import PathClient
+from ._shared.base_client import TransportWrapper, parse_connection_str
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from datetime import datetime
+    from ._models import PathProperties
+
+
+class DataLakeDirectoryClient(PathClient):
+    """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param directory_name:
+        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+    :type directory_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+            :start-after: [START instantiate_directory_client_from_conn_str]
+            :end-before: [END instantiate_directory_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        directory_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name,
+                                                      credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            directory_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeDirectoryClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name:
+            The name of file system to interact with.
+        :type file_system_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :param directory_name:
+            The name of directory to interact with. The directory is under file system.
+        :type directory_name: str
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :return: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, directory_name=directory_name,
+            credential=credential, **kwargs)
+
+    @distributed_trace
+    def create_directory(self, metadata=None,  # type: Optional[Dict[str, str]]
+                         **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new directory.
+
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 8
+                :caption: Create directory.
+        """
+        return self._create('directory', metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def delete_directory(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified directory for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 4
+                :caption: Delete directory.
+        """
+        return self._delete(recursive=True, **kwargs)
+
+    @distributed_trace
+    def get_directory_properties(self, **kwargs):
+        # type: (**Any) -> DirectoryProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the directory. It does not return the content of the directory.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.DirectoryProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            DirectoryProperties with all user-defined metadata, standard HTTP properties,
+            and system properties for the directory. It does not return the content of the directory.
+        :rtype: ~azure.storage.filedatalake.DirectoryProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START get_directory_properties]
+                :end-before: [END get_directory_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return self._get_path_properties(cls=deserialize_dir_properties, **kwargs)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a directory exists, False otherwise.
+        :rtype: bool
+        """
+        return self._exists(**kwargs)
+
+    @distributed_trace
+    def rename_directory(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeDirectoryClient
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            the new directory name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeDirectoryClient with the renamed directory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START rename_directory]
+                :end-before: [END rename_directory]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source directory.
+        """
+        new_file_system, new_path, new_dir_sas = self._parse_rename_path(new_name)
+
+        new_directory_client = DataLakeDirectoryClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, directory_name=new_path,
+            credential=self._raw_credential or new_dir_sas, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=self._pipeline)
+        new_directory_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_directory_client
+
+    @distributed_trace
+    def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create a subdirectory and return the subdirectory client to be interacted with.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        subdir.create_directory(metadata=metadata, **kwargs)
+        return subdir
+
+    @distributed_trace
+    def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                             **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified subdirectory for deletion.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        subdir.delete_directory(**kwargs)
+        return subdir
+
+    @distributed_trace
+    def create_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create a new file and return the file client to be interacted with.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeFileClient with newly created file.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        file_client = self.get_file_client(file)
+        file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def get_paths(
+        self, *,
+        recursive: bool = True,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> ItemPaged["PathProperties"]:
+        """Returns a generator to list the paths under specified file system and directory.
+        The generator will lazily follow the continuation tokens returned by the service.
+
+        :keyword bool recursive: Set True for recursive, False for iterative. The default value is True.
+        :keyword Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword Optional[bool] upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is None. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. The default value is None.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+        """
+        timeout = kwargs.pop('timeout', None)
+        hostname = self._hosts[self._location_mode]
+        url = f"{self.scheme}://{hostname}/{quote(self.file_system_name)}"
+        client = self._build_generated_client(url)
+        command = functools.partial(
+            client.file_system.list_paths,
+            path=self.path_name,
+            timeout=timeout,
+            **kwargs
+        )
+        return ItemPaged(
+            command, recursive, path=self.path_name, max_results=max_results,
+            upn=upn, page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    def get_file_client(self, file  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        try:
+            file_path = file.get('name')
+        except AttributeError:
+            file_path = self.path_name + '/' + str(file)
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
+                                 ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified subdirectory of the current directory.
+
+        The sub subdirectory need not already exist.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        try:
+            subdir_path = sub_directory.get('name')
+        except AttributeError:
+            subdir_path = self.path_name + '/' + str(sub_directory)
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(
+            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py
new file mode 100644
index 00000000..386e8bf1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py
@@ -0,0 +1,983 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from io import BytesIO
+from typing import (
+    Any, AnyStr, AsyncIterable, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from ._quick_query_helper import DataLakeFileQueryReader
+from ._shared.base_client import parse_connection_str
+from ._shared.request_handlers import get_length, read_length
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import IterStreamer
+from ._shared.uploads_async import AsyncIterStreamer
+from ._upload_helper import upload_datalake_file
+from ._download import StorageStreamDownloader
+from ._path_client import PathClient
+from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \
+    convert_datetime_to_rfc1123, get_cpk_info, get_lease_action_properties
+from ._deserialize import process_storage_error, deserialize_file_properties
+from ._models import FileProperties, DataLakeFileQueryError
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import ContentSettings
+
+
+class DataLakeFileClient(PathClient):
+    """A client to interact with the DataLake file, even if the file may not yet exist.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :type file_path: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+            :start-after: [START instantiate_file_client_from_conn_str]
+            :end-before: [END instantiate_file_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        file_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+                                                 credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            file_path: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeFileClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name: The name of file system to interact with.
+        :type file_system_name: str
+        :param str file_path:
+            The whole file path, so that to interact with a specific file.
+            eg. "{directory}/{subdirectory}/{file}"
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, file_path=file_path,
+            credential=credential, **kwargs)
+
+    @distributed_trace
+    def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
+                    metadata=None,  # type: Optional[Dict[str, str]]
+                    **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new file.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: Optional[Dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 4
+                :caption: Create file.
+        """
+        return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def delete_file(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified file for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 4
+                :caption: Delete file.
+        """
+        return self._delete(**kwargs)
+
+    @distributed_trace
+    def get_file_properties(self, **kwargs):
+        # type: (**Any) -> FileProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file. It does not return the content of the file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.FileProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: All user-defined metadata, standard HTTP properties, and system properties for the file.
+        :rtype: ~azure.storage.filedatalake.FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START get_file_properties]
+                :end-before: [END get_file_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return self._get_path_properties(cls=deserialize_file_properties, **kwargs)
+
+    @distributed_trace
+    def set_file_expiry(self, expiry_options,  # type: str
+                        expires_on=None,   # type: Optional[Union[datetime, int]]
+                        **kwargs):
+        # type: (...) -> None
+        """Sets the time a file will expire and be deleted.
+
+        :param str expiry_options:
+            Required. Indicates mode of the expiry time.
+            Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+        :param datetime or int expires_on:
+            The time to set the file to expiry.
+            When expiry_options is RelativeTo*, expires_on should be an int in milliseconds.
+            If the type of expires_on is datetime, it should be in UTC time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        if isinstance(expires_on, datetime):
+            expires_on = convert_datetime_to_rfc1123(expires_on)
+        elif expires_on is not None:
+            expires_on = str(expires_on)
+        self._datalake_client_for_blob_operation.path \
+            .set_expiry(expiry_options, expires_on=expires_on, **kwargs)
+
+    def _upload_options(
+            self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            **kwargs
+        ) -> Dict[str, Any]:
+
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        if length is None:
+            length = get_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        if isinstance(data, bytes):
+            stream = BytesIO(data)
+        elif hasattr(data, 'read'):
+            stream = data
+        elif hasattr(data, '__iter__'):
+            stream = IterStreamer(data, encoding=encoding)
+        elif hasattr(data, '__aiter__'):
+            stream = AsyncIterStreamer(data, encoding=encoding)
+        else:
+            raise TypeError(f"Unsupported data type: {type(data)}")
+
+        validate_content = kwargs.pop('validate_content', False)
+        content_settings = kwargs.pop('content_settings', None)
+        metadata = kwargs.pop('metadata', None)
+        max_concurrency = kwargs.pop('max_concurrency', 1)
+
+        kwargs['properties'] = add_metadata_headers(metadata)
+        kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
+        kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
+        kwargs['cpk_info'] = get_cpk_info(self.scheme, kwargs)
+
+        if content_settings:
+            kwargs['path_http_headers'] = get_path_http_headers(content_settings)
+
+        kwargs['stream'] = stream
+        kwargs['length'] = length
+        kwargs['validate_content'] = validate_content
+        kwargs['max_concurrency'] = max_concurrency
+        kwargs['client'] = self._client.path
+        kwargs['file_settings'] = self._config
+
+        return kwargs
+
+    @distributed_trace
+    def upload_data(
+            self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            overwrite: Optional[bool] = False,
+            **kwargs
+        ) -> Dict[str, Any]:
+        """
+        Upload data to a file.
+
+        :param data: Content to be uploaded to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int length: Size of the data in bytes.
+        :param bool overwrite: to overwrite an existing file or not.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the blob as metadata.
+        :paramtype metadata: Optional[Dict[str, str]]
+        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str permissions: Optional and only valid if Hierarchical Namespace
+         is enabled for the account. Sets POSIX access permissions for the file
+         owner, the file owning group, and others. Each class may be granted
+         read, write, or execute permission.  The sticky bit is also supported.
+         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+         supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int chunk_size:
+            The maximum chunk size for uploading a file in chunks.
+            Defaults to 100*1024*1024, or 100MB.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        options = self._upload_options(
+            data,
+            length=length,
+            overwrite=overwrite,
+            **kwargs)
+        return upload_datalake_file(**options)
+
+    @staticmethod
+    def _append_data_options(
+            data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+            offset, # type: int
+            scheme, # type: str
+            length=None, # type: Optional[int]
+            **kwargs
+        ):
+        # type: (...) -> Dict[str, Any]
+
+        if isinstance(data, str):
+            data = data.encode(kwargs.pop('encoding', 'UTF-8'))  # type: ignore
+        if length is None:
+            length = get_length(data)
+            if length is None:
+                length, data = read_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        cpk_info = get_cpk_info(scheme, kwargs)
+        kwargs.update(get_lease_action_properties(kwargs))
+
+        options = {
+            'body': data,
+            'position': offset,
+            'content_length': length,
+            'validate_content': kwargs.pop('validate_content', False),
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def append_data(self, data,  # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+                    offset,  # type: int
+                    length=None,  # type: Optional[int]
+                    **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime, int]]
+        """Append data to the file.
+
+        :param data: Content to be appended to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int offset: start position of the data to be appended to.
+        :param length: Size of the data in bytes.
+        :type length: int or None
+        :keyword bool flush:
+            If true, will commit the data after it is appended.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete. Requires `flush=True`.
+            "acquire-release" - Acquire a lease and release it once the operations is complete. Requires `flush=True`.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: dict of the response header.
+        :rtype: dict[str, str], dict[str, ~datetime.datetime], or dict[str, int]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START append_data]
+                :end-before: [END append_data]
+                :language: python
+                :dedent: 4
+                :caption: Append data to the file.
+        """
+        options = self._append_data_options(
+            data=data,
+            offset=offset,
+            scheme=self.scheme,
+            length=length,
+            **kwargs)
+        try:
+            return self._client.path.append_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _flush_data_options(
+            offset, # type: int
+            scheme, # type: str
+            content_settings=None, # type: Optional[ContentSettings]
+            retain_uncommitted_data=False, # type: Optional[bool]
+            **kwargs
+        ):
+        # type: (...) -> Dict[str, Any]
+
+        mod_conditions = get_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        cpk_info = get_cpk_info(scheme, kwargs)
+        kwargs.update(get_lease_action_properties(kwargs))
+
+        options = {
+            'position': offset,
+            'content_length': 0,
+            'path_http_headers': path_http_headers,
+            'retain_uncommitted_data': retain_uncommitted_data,
+            'close': kwargs.pop('close', False),
+            'modified_access_conditions': mod_conditions,
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def flush_data(self, offset,  # type: int
+                   retain_uncommitted_data=False,   # type: Optional[bool]
+                   **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """ Commit the previous appended data.
+
+        :param int offset: offset is equal to the length of the file after commit
+            the previous appended data.
+        :param bool retain_uncommitted_data: Valid only for flush operations.  If
+            "true", uncommitted data is retained after the flush operation
+            completes; otherwise, the uncommitted data is deleted after the flush
+            operation.  The default is false.  Data at offsets less than the
+            specified position are written to the file when flush succeeds, but
+            this optional parameter allows data after the flush position to be
+            retained for a future flush operation.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword bool close: Azure Storage Events allow applications to receive
+            notifications when files change. When Azure Storage Events are
+            enabled, a file changed event is raised. This event has a property
+            indicating whether this is the final change to distinguish the
+            difference between an intermediate flush to a file stream and the
+            final close of a file stream. The close query parameter is valid only
+            when the action is "flush" and change notifications are enabled. If
+            the value of close is "true" and the flush operation completes
+            successfully, the service raises a file change notification with a
+            property indicating that this is the final update (the file stream has
+            been closed). If "false" a change notification is raised indicating
+            the file has changed. The default is false. This query parameter is
+            set to true by the Hadoop ABFS driver to indicate that the file stream
+            has been closed."
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete.
+            "acquire-release" - Acquire a lease and release it once the operations is complete.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: response header in dict
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START upload_file_to_file_system]
+                :end-before: [END upload_file_to_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Commit the previous appended data.
+        """
+        options = self._flush_data_options(
+            offset,
+            self.scheme,
+            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+        try:
+            return self._client.path.flush_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def download_file(self, offset=None, length=None, **kwargs):
+        # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content, or readinto() must be used to download the file into
+        a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword lease:
+            If specified, download only succeeds if the file's lease is active
+            and matches this ID. Required if the file has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.filedatalake.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START read_file]
+                :end-before: [END read_file]
+                :language: python
+                :dedent: 4
+                :caption: Return the downloaded data.
+        """
+        downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+        return StorageStreamDownloader(downloader)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file exists, otherwise returns False.
+        :rtype: bool
+        """
+        return self._exists(**kwargs)
+
+    @distributed_trace
+    def rename_file(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeFileClient
+        """
+        Rename the source file.
+
+        :param str new_name: the new file name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+         the source path must have an active lease and the lease ID must
+         match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: the renamed file client
+        :rtype: DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START rename_file]
+                :end-before: [END rename_file]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source file.
+        """
+        new_file_system, new_path, new_file_sas = self._parse_rename_path(new_name)
+
+        new_file_client = DataLakeFileClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, file_path=new_path,
+            credential=self._raw_credential or new_file_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode
+        )
+        new_file_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_file_client
+
+    @distributed_trace
+    def query_file(self, query_expression, **kwargs):
+        # type: (str, **Any) -> DataLakeFileQueryReader
+        """
+        Enables users to select/project on datalake file data by providing simple query expressions.
+        This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data.
+
+        :param str query_expression:
+            Required. a query statement.
+            eg. Select * from DataLakeStorage
+        :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error:
+            A function to be called on any processing errors returned by the service.
+        :keyword file_format:
+            Optional. Defines the serialization of the data currently stored in the file. The default is to
+            treat the file data as CSV data formatted in the default dialect. This can be overridden with
+            a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum).
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string.
+        :paramtype file_format:
+            ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or
+            ~azure.storage.filedatalake.QuickQueryDialect or str
+        :keyword output_format:
+            Optional. Defines the output serialization for the data stream. By default the data will be returned
+            as it is represented in the file. By providing an output format,
+            the file data will be reformatted according to that profile.
+            This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect.
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string.
+        :paramtype output_format:
+            ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect
+            or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A streaming object (DataLakeFileQueryReader)
+        :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_query.py
+                :start-after: [START query]
+                :end-before: [END query]
+                :language: python
+                :dedent: 4
+                :caption: select/project on datalake file data by providing simple query expressions.
+        """
+        query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage")
+        blob_quick_query_reader = self._blob_client.query_blob(query_expression,
+                                                               blob_format=kwargs.pop('file_format', None),
+                                                               error_cls=DataLakeFileQueryError,
+                                                               **kwargs)
+        return DataLakeFileQueryReader(blob_quick_query_reader)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py
new file mode 100644
index 00000000..0f65f1c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py
@@ -0,0 +1,273 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import (
+    Union, Optional, Any,
+    TypeVar, TYPE_CHECKING
+)
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobLeaseClient
+
+
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    FileSystemClient = TypeVar("FileSystemClient")
+    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+    DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(object):  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new DataLakeLeaseClient.
+
+    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file system, directory, or file to lease.
+    :type client: ~azure.storage.filedatalake.FileSystemClient or
+        ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(
+            self, client, lease_id=None
+    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+
+        if hasattr(client, '_blob_client'):
+            _client = client._blob_client  # type: ignore
+        elif hasattr(client, '_container_client'):
+            _client = client._container_client  # type: ignore
+        else:
+            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.release()
+
+    @distributed_trace
+    def acquire(self, lease_duration=-1, **kwargs):
+        # type: (int, Optional[int], **Any) -> None
+        """Requests a new lease.
+
+        If the file/file system does not have an active lease, the DataLake service creates a
+        lease on the file/file system and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def renew(self, **kwargs):
+        # type: (Any) -> None
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the file system or file. Note that
+        the lease may be renewed even if it has expired as long as the file system
+        or file has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.renew(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def release(self, **kwargs):
+        # type: (Any) -> None
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the file system or file. Releasing the lease allows another client
+        to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.release(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def change(self, proposed_lease_id, **kwargs):
+        # type: (str, Any) -> None
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def break_lease(self, lease_break_period=None, **kwargs):
+        # type: (Optional[int], Any) -> int
+        """Break the lease, if the file system or file has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the file system or file.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
+
+    def _update_lease_client_attributes(self):
+        self.id = self._blob_lease_client.id  # type: str
+        self.last_modified = self._blob_lease_client.last_modified  # type: datetime
+        self.etag = self._blob_lease_client.etag  # type: str
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py
new file mode 100644
index 00000000..6ec34b94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py
@@ -0,0 +1,633 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+from urllib.parse import urlparse
+
+from typing_extensions import Self
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobServiceClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._deserialize import get_datalake_service_properties
+from ._file_system_client import FileSystemClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_file_client import DataLakeFileClient
+from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode
+from ._serialize import convert_dfs_url_to_blob_url, get_api_version
+from ._generated import AzureDataLakeStorageRESTAPI
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+
+
+class DataLakeServiceClient(StorageAccountHostsMixin):
+    """A client to interact with the DataLake Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete file systems within the account.
+    For operations relating to a specific file system, directory or file, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the datalake service endpoint.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URL to the DataLake storage account. Any other entities included
+        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_service.py
+            :start-after: [START create_datalake_service_client]
+            :end-before: [END create_datalake_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the DataLakeServiceClient from connection string.
+
+        .. literalinclude:: ../samples/datalake_samples_service.py
+            :start-after: [START create_datalake_service_client_oauth]
+            :end-before: [END create_datalake_service_client_oauth]
+            :language: python
+            :dedent: 8
+            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+            self, account_url: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("Account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        self._blob_account_url = blob_account_url
+        self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs)
+        self._blob_service_client._hosts[LocationMode.SECONDARY] = ""
+
+        _, sas_token = parse_query(parsed_url.query)
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs',
+                                                    credential=self._raw_credential, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)
+
+    def __enter__(self):
+        self._blob_service_client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._blob_service_client.close()
+        super(DataLakeServiceClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    def _format_url(self, hostname):
+        """Format the endpoint URL according to hostname.
+
+        :param str hostname: The hostname for the endpoint URL.
+        :returns: The formatted URL
+        :rtype: str
+        """
+        formatted_url = f"{self.scheme}://{hostname}/{self._query_str}"
+        return formatted_url
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential from azure.core.credentials, an account shared access
+            key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A DataLakeServiceClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_data_lake_service_client_from_conn_str]
+                :end-before: [END create_data_lake_service_client_from_conn_str]
+                :language: python
+                :dedent: 8
+                :caption: Creating the DataLakeServiceClient from a connection string.
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace
+    def get_user_delegation_key(self, key_start_time,  # type: datetime
+                                key_expiry_time,  # type: datetime
+                                **kwargs  # type: Any
+                                ):
+        # type: (...) -> UserDelegationKey
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_user_delegation_key]
+                :end-before: [END get_user_delegation_key]
+                :language: python
+                :dedent: 8
+                :caption: Get user delegation key from datalake service client.
+        """
+        delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time,
+                                                                           key_expiry_time=key_expiry_time,
+                                                                           **kwargs)
+        return UserDelegationKey._from_generated(delegation_key)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
+                          include_metadata=None,  # type: Optional[bool]
+                          **kwargs):
+        # type: (...) -> ItemPaged[FileSystemProperties]
+        """Returns a generator to list the file systems under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all file systems have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only file systems whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that file system metadata be returned in the response.
+            The default value is `False`.
+        :keyword int results_per_page:
+            The maximum number of file system names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword bool include_deleted:
+            Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.3.0
+        :keyword bool include_system:
+            Flag specifying that system filesystems should be included.
+            .. versionadded:: 12.6.0
+        :returns: An iterable (auto-paging) of FileSystemProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START list_file_systems]
+                :end-before: [END list_file_systems]
+                :language: python
+                :dedent: 8
+                :caption: Listing the file systems in the datalake service.
+        """
+        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+                                                               include_metadata=include_metadata,
+                                                               **kwargs)
+        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
+        return item_paged
+
+    @distributed_trace
+    def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                           metadata=None,  # type: Optional[Dict[str, str]]
+                           public_access=None,  # type: Optional[PublicAccess]
+                           **kwargs):
+        # type: (...) -> FileSystemClient
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param str file_system:
+            The name of the file system to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: file system, file.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with newly created file system.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START create_file_system_from_service_client]
+                :end-before: [END create_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Creating a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+        return file_system_client
+
+    def _rename_file_system(self, name, new_name, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str name:
+            The name of the filesystem to rename.
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with the specified file system renamed.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        self._blob_service_client._rename_container(name, new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = self.get_file_system_client(new_name)
+        return renamed_file_system
+
+    @distributed_trace
+    def undelete_file_system(self, name, deleted_version, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Restores soft-deleted filesystem.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.3.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str name:
+            Specifies the name of the deleted filesystem to restore.
+        :param str deleted_version:
+            Specifies the version of the deleted filesystem to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: The restored solft-deleted FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        file_system = self.get_file_system_client(new_name or name)
+        self._blob_service_client.undelete_container(
+            name, deleted_version, new_name=new_name, **kwargs)
+        return file_system
+
+    @distributed_trace
+    def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                           **kwargs):
+        # type: (...) -> FileSystemClient
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :param file_system:
+            The file system to delete. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with the specified file system deleted.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START delete_file_system_from_service_client]
+                :end-before: [END delete_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Deleting a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        file_system_client.delete_file_system(**kwargs)
+        return file_system_client
+
+    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
+                               ):
+        # type: (...) -> FileSystemClient
+        """Get a client to interact with the specified file system.
+
+        The file system need not already exist.
+
+        :param file_system:
+            The file system. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system_client_from_service]
+                :end-before: [END create_file_system_client_from_service]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file system client to interact with a specific file system.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+                                api_version=self.api_version,
+                                _configuration=self._config,
+                                _pipeline=_pipeline, _hosts=self._hosts)
+
+    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                             directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param file_system:
+            The file system that the directory is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_directory_client_from_service_client]
+                :end-before: [END get_directory_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            directory_name = directory.name
+        except AttributeError:
+            directory_name = directory
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                        file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_system:
+            The file system that the file is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param file_path:
+            The file with which to interact. This can either be the full path of the file(from the root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_file_client_from_service_client]
+                :end-before: [END get_file_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            file_path = file_path.name
+        except AttributeError:
+            pass
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace
+    def set_service_properties(self, **kwargs):
+        # type: (**Any) -> None
+        """Sets the properties of a storage account's Datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :keyword analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+        :keyword hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates.
+        :type hour_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute.
+        :type minute_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.filedatalake.CorsRule]
+        :keyword str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :keyword delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted files/directories.
+            It also specifies the number of days and versions of file/directory to keep.
+        :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+        :keyword static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.filedatalake.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        return self._blob_service_client.set_service_properties(**kwargs)
+
+    @distributed_trace
+    def get_service_properties(self, **kwargs):
+        # type: (**Any) -> Dict[str, Any]
+        """Gets the properties of a storage account's datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An object containing datalake service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: dict[str, Any]
+        """
+        props = self._blob_service_client.get_service_properties(**kwargs)
+        return get_datalake_service_properties(props)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py
new file mode 100644
index 00000000..9ebaa641
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py
@@ -0,0 +1,241 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn, TYPE_CHECKING
+from xml.etree.ElementTree import Element
+
+from azure.core.pipeline.policies import ContentDecodePolicy
+from azure.core.exceptions import (
+    HttpResponseError,
+    DecodeError,
+    ResourceModifiedError,
+    ClientAuthenticationError,
+    ResourceNotFoundError,
+    ResourceExistsError
+)
+from ._models import (
+    FileProperties,
+    DirectoryProperties,
+    LeaseProperties,
+    DeletedPathProperties,
+    StaticWebsite,
+    RetentionPolicy,
+    Metrics,
+    AnalyticsLogging,
+    PathProperties
+)
+from ._shared.models import StorageErrorCode
+from ._shared.response_handlers import deserialize_metadata
+
+if TYPE_CHECKING:
+    pass
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def deserialize_dir_properties(response, obj, headers):
+    metadata = deserialize_metadata(response, obj, headers)
+    dir_properties = DirectoryProperties(
+        metadata=metadata,
+        owner=response.headers.get('x-ms-owner'),
+        group=response.headers.get('x-ms-group'),
+        permissions=response.headers.get('x-ms-permissions'),
+        acl=response.headers.get('x-ms-acl'),
+        **headers
+    )
+    return dir_properties
+
+
+def deserialize_file_properties(response, obj, headers):
+    metadata = deserialize_metadata(response, obj, headers)
+    # DataLake specific headers that are not deserialized in blob are pulled directly from the raw response header
+    file_properties = FileProperties(
+        metadata=metadata,
+        encryption_context=response.headers.get('x-ms-encryption-context'),
+        owner=response.headers.get('x-ms-owner'),
+        group=response.headers.get('x-ms-group'),
+        permissions=response.headers.get('x-ms-permissions'),
+        acl=response.headers.get('x-ms-acl'),
+        **headers
+    )
+    if 'Content-Range' in headers:
+        if 'x-ms-blob-content-md5' in headers:
+            file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
+        else:
+            file_properties.content_settings.content_md5 = None
+    return file_properties
+
+
+def deserialize_path_properties(path_list):
+    return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access
+
+
+def return_headers_and_deserialized_path_list(response, deserialized, response_headers):  # pylint: disable=name-too-long, unused-argument
+    return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers)
+
+
+def get_deleted_path_properties_from_generated_code(generated):  # pylint: disable=name-too-long
+    deleted_path = DeletedPathProperties()
+    deleted_path.name = generated.name
+    deleted_path.deleted_time = generated.properties.deleted_time
+    deleted_path.remaining_retention_days = generated.properties.remaining_retention_days
+    deleted_path.deletion_id = generated.deletion_id
+    return deleted_path
+
+
+def is_file_path(_, __, headers):
+    if headers['x-ms-resource-type'] == "file":
+        return True
+    return False
+
+
+def get_datalake_service_properties(datalake_properties):
+    datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated(    # pylint: disable=protected-access
+        datalake_properties["analytics_logging"])
+    datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"])  # pylint: disable=protected-access
+    datalake_properties["minute_metrics"] = Metrics._from_generated(    # pylint: disable=protected-access
+        datalake_properties["minute_metrics"])
+    datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated(   # pylint: disable=protected-access
+        datalake_properties["delete_retention_policy"])
+    datalake_properties["static_website"] = StaticWebsite._from_generated(  # pylint: disable=protected-access
+        datalake_properties["static_website"])
+    return datalake_properties
+
+
+def from_blob_properties(blob_properties, **additional_args):
+    file_props = FileProperties()
+    file_props.name = blob_properties.name
+    file_props.etag = blob_properties.etag
+    file_props.deleted = blob_properties.deleted
+    file_props.metadata = blob_properties.metadata
+    file_props.lease = blob_properties.lease
+    file_props.lease.__class__ = LeaseProperties
+    file_props.last_modified = blob_properties.last_modified
+    file_props.creation_time = blob_properties.creation_time
+    file_props.size = blob_properties.size
+    file_props.deleted_time = blob_properties.deleted_time
+    file_props.remaining_retention_days = blob_properties.remaining_retention_days
+    file_props.content_settings = blob_properties.content_settings
+
+    # Parse additional Datalake-only properties
+    file_props.encryption_context = additional_args.pop('encryption_context', None)
+    file_props.owner = additional_args.pop('owner', None)
+    file_props.group = additional_args.pop('group', None)
+    file_props.permissions = additional_args.pop('permissions', None)
+    file_props.acl = additional_args.pop('acl', None)
+
+    return file_props
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = value
+    return normalized
+
+
+def process_storage_error(storage_error) -> NoReturn:  # pylint:disable=too-many-statements
+    raise_error = HttpResponseError
+    serialized = False
+    if not storage_error.response:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        if error_dict:
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error.
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.invalid_property_name,
+                              StorageErrorCode.invalid_source_uri,
+                              StorageErrorCode.source_path_not_found,
+                              StorageErrorCode.lease_name_mismatch,
+                              StorageErrorCode.file_system_not_found,
+                              StorageErrorCode.path_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.invalid_destination_path,
+                              StorageErrorCode.invalid_rename_source_path,
+                              StorageErrorCode.lease_is_already_broken,
+                              StorageErrorCode.invalid_source_or_destination_resource_type,
+                              StorageErrorCode.rename_destination_parent_path_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.source_path_is_being_deleted,
+                              StorageErrorCode.path_already_exists,
+                              StorageErrorCode.destination_path_is_being_deleted,
+                              StorageErrorCode.file_system_already_exists,
+                              StorageErrorCode.file_system_being_deleted,
+                              StorageErrorCode.path_conflict]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py
new file mode 100644
index 00000000..da8c879a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import IO, Iterator, Optional
+
+from ._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+    """A streaming object to download from Azure Storage.
+
+    :ivar str name:
+        The name of the file being downloaded.
+    :ivar ~azure.storage.filedatalake.FileProperties properties:
+        The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties.
+    :ivar int size:
+        The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file.
+    """
+
+    def __init__(self, downloader):
+        self._downloader = downloader
+        self.name = self._downloader.name
+
+        # Parse additional Datalake-only properties
+        encryption_context = self._downloader._response.response.headers.get('x-ms-encryption-context')
+        acl = self._downloader._response.response.headers.get('x-ms-acl')
+
+        self.properties = from_blob_properties(
+            self._downloader.properties,
+            encryption_context=encryption_context,
+            acl=acl)
+        self.size = self._downloader.size
+
+    def __len__(self):
+        return self.size
+
+    def chunks(self) -> Iterator[bytes]:
+        """Iterate over chunks in the download stream.
+
+        :returns: An iterator containing the chunks in the download stream.
+        :rtype: Iterator[bytes]
+        """
+        return self._downloader.chunks()
+
+    def read(self, size: Optional[int] = -1) -> bytes:
+        """
+        Read up to size bytes from the stream and return them. If size
+        is unspecified or is -1, all bytes will be read.
+
+        :param int size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set to -1 to download all bytes.
+        :returns:
+            The requested data as bytes. If the return value is empty, there is no more data to read.
+        :rtype: bytes
+        """
+        return self._downloader.read(size)
+
+    def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :returns: The contents of the specified file.
+        :rtype: bytes
+        """
+        return self._downloader.readall()
+
+    def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        return self._downloader.readinto(stream)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py
new file mode 100644
index 00000000..7017527f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py
@@ -0,0 +1,1074 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+from urllib.parse import urlparse, quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.pipeline import Pipeline
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import ContainerClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._serialize import convert_dfs_url_to_blob_url, get_api_version
+from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged
+from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \
+    DirectoryProperties
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._generated.models import ListBlobsIncludeItem
+from ._deserialize import process_storage_error, is_file_path
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from datetime import datetime
+    from ._models import PathProperties
+
+
+class FileSystemClient(StorageAccountHostsMixin):
+    """A client to interact with a specific file system, even if that file system
+    may not yet exist.
+
+    For operations relating to a specific directory or file within this file system, a directory client or file client
+    can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_file_system.py
+            :start-after: [START create_file_system_client_from_service]
+            :end-before: [END create_file_system_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+        if not file_system_name:
+            raise ValueError("Please specify a file system name.")
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        # TODO: add self.account_url to base_client and remove _blob_account_url
+        self._blob_account_url = blob_account_url
+
+        datalake_hosts = kwargs.pop('_hosts', None)
+        blob_hosts = None
+        if datalake_hosts:
+            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+        self._container_client = ContainerClient(blob_account_url, file_system_name,
+                                                 credential=credential, _hosts=blob_hosts, **kwargs)
+
+        _, sas_token = parse_query(parsed_url.query)
+        self.file_system_name = file_system_name
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+                                               _hosts=datalake_hosts, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url,
+                                                   file_system=file_system_name, pipeline=self._pipeline)
+        api_version = get_api_version(kwargs)
+        self._client._config.version = api_version
+        self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+                                                                               base_url=self._container_client.url,
+                                                                               file_system=file_system_name,
+                                                                               pipeline=self._pipeline)
+        self._datalake_client_for_blob_operation._config.version = api_version
+
+    def _format_url(self, hostname):
+        file_system_name = self.file_system_name
+        if isinstance(file_system_name, str):
+            file_system_name = file_system_name.encode('UTF-8')
+        return f"{self.scheme}://{hostname}/{quote(file_system_name)}{self._query_str}"
+
+    def __exit__(self, *args):
+        self._container_client.close()
+        self._datalake_client_for_blob_operation.close()
+        super(FileSystemClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create FileSystemClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name: The name of file system to interact with.
+        :type file_system_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system_client_from_connection_string]
+                :end-before: [END create_file_system_client_from_connection_string]
+                :language: python
+                :dedent: 8
+                :caption: Create FileSystemClient from connection string
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, credential=credential, **kwargs)
+
+    @distributed_trace
+    def acquire_lease(
+        self, lease_duration=-1,  # type: int
+        lease_id=None,  # type: Optional[str]
+        **kwargs
+    ):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file system does not have an active lease,
+        the DataLake service creates a lease on the file system and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START acquire_lease_on_file_system]
+                :end-before: [END acquire_lease_on_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Acquiring a lease on the file system.
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)
+        lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace
+    def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
+                           public_access=None,  # type: Optional[PublicAccess]
+                           **kwargs):
+        # type: (...) ->  Dict[str, Union[str, datetime]]
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system]
+                :end-before: [END create_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Creating a file system in the datalake service.
+        """
+        encryption_scope_options = kwargs.pop('encryption_scope_options', None)
+        return self._container_client.create_container(metadata=metadata,
+                                                       public_access=public_access,
+                                                       container_encryption_scope=encryption_scope_options,
+                                                       **kwargs)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file system exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file system exists, False otherwise.
+        :rtype: bool
+        """
+        return self._container_client.exists(**kwargs)
+
+    def _rename_file_system(self, new_name, **kwargs):
+        # type: (str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with renamed properties.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        self._container_client._rename_container(new_name, **kwargs)   # pylint: disable=protected-access
+        #TODO: self._raw_credential would not work with SAS tokens
+        renamed_file_system = FileSystemClient(
+                f"{self.scheme}://{self.primary_hostname}", file_system_name=new_name,
+                credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts)
+        return renamed_file_system
+
+    @distributed_trace
+    def delete_file_system(self, **kwargs):
+        # type: (Any) -> None
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_file_system]
+                :end-before: [END delete_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Deleting a file system in the datalake service.
+        """
+        self._container_client.delete_container(**kwargs)
+
+    @distributed_trace
+    def get_file_system_properties(self, **kwargs):
+        # type: (Any) -> FileSystemProperties
+        """Returns all user-defined metadata and system properties for the specified
+        file system. The data returned does not include the file system's list of paths.
+
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, get_file_system_properties only succeeds if the
+            file system's lease is active and matches this ID.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Properties for the specified file system within a file system object.
+        :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_file_system_properties]
+                :end-before: [END get_file_system_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting properties on the file system.
+        """
+        container_properties = self._container_client.get_container_properties(**kwargs)
+        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def set_file_system_metadata(  # type: ignore
+        self, metadata,  # type: Dict[str, str]
+        **kwargs
+    ):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: filesystem-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START set_file_system_metadata]
+                :end-before: [END set_file_system_metadata]
+                :language: python
+                :dedent: 12
+                :caption: Setting metadata on the file system.
+        """
+        return self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def set_file_system_access_policy(
+            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
+            public_access=None,  # type: Optional[Union[str, PublicAccess]]
+            **kwargs
+    ):  # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets the permissions for the specified file system or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a file system may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the file system. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+        :param ~azure.storage.filedatalake.PublicAccess public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :keyword lease:
+            Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: File System-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return self._container_client.set_container_access_policy(signed_identifiers,
+                                                                  public_access=public_access, **kwargs)
+
+    @distributed_trace
+    def get_file_system_access_policy(self, **kwargs):
+        # type: (Any) -> Dict[str, Any]
+        """Gets the permissions for the specified file system.
+        The permissions indicate whether file system data may be accessed publicly.
+
+        :keyword lease:
+            If specified, the operation only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_policy = self._container_client.get_container_access_policy(**kwargs)
+        return {
+            'public_access': PublicAccess._from_generated(access_policy['public_access']),  # pylint: disable=protected-access
+            'signed_identifiers': access_policy['signed_identifiers']
+        }
+
+    @distributed_trace
+    def get_paths(
+        self, path: Optional[str] = None,
+        recursive: Optional[bool] = True,
+        max_results: Optional[int] = None,
+        **kwargs: Any
+    ) -> ItemPaged["PathProperties"]:
+        """Returns a generator to list the paths(could be files or directories) under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param Optional[str] path:
+            Filters the results to return only paths under the specified path.
+        :param Optional[bool] recursive: Optional. Set True for recursive, False for iterative.
+        :param Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_paths_in_file_system]
+                :end-before: [END get_paths_in_file_system]
+                :language: python
+                :dedent: 8
+                :caption: List the paths in the file system.
+        """
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.file_system.list_paths,
+            path=path,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, recursive, path=path, max_results=max_results,
+            page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    @distributed_trace
+    def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                         metadata=None,  # type: Optional[Dict[str, str]]
+                         **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create directory
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient with new directory and metadata.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_directory_from_file_system]
+                :end-before: [END create_directory_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Create directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        directory_client.create_directory(metadata=metadata, **kwargs)
+        return directory_client
+
+    @distributed_trace
+    def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                         **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified path for deletion.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient after deleting specified directory.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_directory_from_file_system]
+                :end-before: [END delete_directory_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Delete directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        directory_client.delete_directory(**kwargs)
+        return directory_client
+
+    @distributed_trace
+    def create_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create file
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with new file created.
+        :rtype: ~azure.storage.file.datalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_from_file_system]
+                :end-before: [END create_file_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Create file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def delete_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Marks the specified file for deletion.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: DataLakeFileClient after deleting specified file.
+        :rtype: azure.storage.file.datalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_file_from_file_system]
+                :end-before: [END delete_file_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Delete file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        file_client.delete_file(**kwargs)
+        return file_client
+
+    def _undelete_path_options(self, deleted_path_name, deletion_id):
+        quoted_path = quote(unquote(deleted_path_name.strip('/')))
+
+        url_and_token = self.url.replace('.dfs.', '.blob.').split('?')
+        try:
+            url = url_and_token[0] + '/' + quoted_path + url_and_token[1]
+        except IndexError:
+            url = url_and_token[0] + '/' + quoted_path
+
+        undelete_source = quoted_path + f'?deletionid={deletion_id}' if deletion_id else None
+
+        return quoted_path, url, undelete_source
+
+    def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+        # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+        """Restores soft-deleted path.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :param str deleted_path_name:
+            Specifies the path (file or directory) to restore.
+        :param str deletion_id:
+            Specifies the version of the deleted path to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Returns the DataLake client for the restored soft-deleted path.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient
+        """
+        _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+        pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        path_client = AzureDataLakeStorageRESTAPI(
+            url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+        try:
+            is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+            if is_file:
+                return self.get_file_client(deleted_path_name)
+            return self.get_directory_client(deleted_path_name)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_root_directory_client(self):
+        # type: () -> DataLakeDirectoryClient
+        """Get a client to interact with the root directory.
+
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        return self.get_directory_client('/')
+
+    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_directory_client_from_file_system]
+                :end-before: [END get_directory_client_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            directory_name = directory.get('name')
+        except AttributeError:
+            directory_name = str(directory)
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_path:
+            The file with which to interact. This can either be the path of the file(from root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_file_client_from_file_system]
+                :end-before: [END get_file_client_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_path = file_path.get('name')
+        except AttributeError:
+            file_path = str(file_path)
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace
+    def list_deleted_paths(self, **kwargs):
+        # type: (Any) -> ItemPaged[DeletedPathProperties]
+        """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword str path_prefix:
+            Filters the results to return only paths under the specified path.
+        :keyword int results_per_page:
+            An optional value that specifies the maximum number of items to return per page.
+            If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of DeletedPathProperties.
+        :rtype:
+            ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+        """
+        path_prefix = kwargs.pop('path_prefix', None)
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+            showonly=ListBlobsIncludeItem.deleted,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+            results_per_page=results_per_page, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py
new file mode 100644
index 00000000..8a9f7149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureDataLakeStorageRESTAPI",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000..ae1c9c2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Optional
+from typing_extensions import Self
+
+from azure.core import PipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest, HttpResponse
+
+from . import models as _models
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from ._serialization import Deserializer, Serializer
+from .operations import FileSystemOperations, PathOperations, ServiceOperations
+
+
+class AzureDataLakeStorageRESTAPI:  # pylint: disable=client-accepts-api-version-keyword
+    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.filedatalake.operations.ServiceOperations
+    :ivar file_system: FileSystemOperations operations
+    :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
+    :ivar path: PathOperations operations
+    :vartype path: azure.storage.filedatalake.operations.PathOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", x_ms_lease_duration: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        self._config = AzureDataLakeStorageRESTAPIConfiguration(
+            url=url, x_ms_lease_duration=x_ms_lease_duration, **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: PipelineClient = PipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = client._send_request(request)
+        <HttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.HttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    def close(self) -> None:
+        self._client.close()
+
+    def __enter__(self) -> Self:
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *exc_details: Any) -> None:
+        self._client.__exit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py
new file mode 100644
index 00000000..ce7d9c28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureDataLakeStorageRESTAPIConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureDataLakeStorageRESTAPI.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, x_ms_lease_duration: Optional[int] = None, **kwargs: Any) -> None:
+        resource: Literal["filesystem"] = kwargs.pop("resource", "filesystem")
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.x_ms_lease_duration = x_ms_lease_duration
+        self.resource = resource
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azuredatalakestoragerestapi/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py
new file mode 100644
index 00000000..a066e16a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py
@@ -0,0 +1,2050 @@
+# pylint: disable=too-many-lines
+# --------------------------------------------------------------------------
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+#
+# The MIT License (MIT)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the ""Software""), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+    Dict,
+    Any,
+    cast,
+    Optional,
+    Union,
+    AnyStr,
+    IO,
+    Mapping,
+    Callable,
+    MutableMapping,
+    List,
+)
+
+try:
+    from urllib import quote  # type: ignore
+except ImportError:
+    from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate  # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+    # Accept "text" because we're open minded people...
+    JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+    # Name used in context
+    CONTEXT_NAME = "deserialized_data"
+
+    @classmethod
+    def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+        """Decode data according to content-type.
+
+        Accept a stream of data as well, but will be load at once in memory for now.
+
+        If no content-type, will return the string version (not bytes, not stream)
+
+        :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+        :type data: str or bytes or IO
+        :param str content_type: The content type.
+        :return: The deserialized data.
+        :rtype: object
+        """
+        if hasattr(data, "read"):
+            # Assume a stream
+            data = cast(IO, data).read()
+
+        if isinstance(data, bytes):
+            data_as_str = data.decode(encoding="utf-8-sig")
+        else:
+            # Explain to mypy the correct type.
+            data_as_str = cast(str, data)
+
+            # Remove Byte Order Mark if present in string
+            data_as_str = data_as_str.lstrip(_BOM)
+
+        if content_type is None:
+            return data
+
+        if cls.JSON_REGEXP.match(content_type):
+            try:
+                return json.loads(data_as_str)
+            except ValueError as err:
+                raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+        elif "xml" in (content_type or []):
+            try:
+
+                try:
+                    if isinstance(data, unicode):  # type: ignore
+                        # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+                        data_as_str = data_as_str.encode(encoding="utf-8")  # type: ignore
+                except NameError:
+                    pass
+
+                return ET.fromstring(data_as_str)  # nosec
+            except ET.ParseError as err:
+                # It might be because the server has an issue, and returned JSON with
+                # content-type XML....
+                # So let's try a JSON load, and if it's still broken
+                # let's flow the initial exception
+                def _json_attemp(data):
+                    try:
+                        return True, json.loads(data)
+                    except ValueError:
+                        return False, None  # Don't care about this one
+
+                success, json_result = _json_attemp(data)
+                if success:
+                    return json_result
+                # If i'm here, it's not JSON, it's not XML, let's scream
+                # and raise the last context in this block (the XML exception)
+                # The function hack is because Py2.7 messes up with exception
+                # context otherwise.
+                _LOGGER.critical("Wasn't XML not JSON, failing")
+                raise DeserializationError("XML is invalid") from err
+        elif content_type.startswith("text/"):
+            return data_as_str
+        raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+    @classmethod
+    def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+        """Deserialize from HTTP response.
+
+        Use bytes and headers to NOT use any requests/aiohttp or whatever
+        specific implementation.
+        Headers will tested for "content-type"
+
+        :param bytes body_bytes: The body of the response.
+        :param dict headers: The headers of the response.
+        :returns: The deserialized data.
+        :rtype: object
+        """
+        # Try to use content-type from headers if available
+        content_type = None
+        if "content-type" in headers:
+            content_type = headers["content-type"].split(";")[0].strip().lower()
+        # Ouch, this server did not declare what it sent...
+        # Let's guess it's JSON...
+        # Also, since Autorest was considering that an empty body was a valid JSON,
+        # need that test as well....
+        else:
+            content_type = "application/json"
+
+        if body_bytes:
+            return cls.deserialize_from_text(body_bytes, content_type)
+        return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+    _long_type = long  # type: ignore
+except NameError:
+    _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(?<!\\)\.")
+
+
+def attribute_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the Python attribute.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A key using attribute name
+    :rtype: str
+    """
+    return (key, value)
+
+
+def full_restapi_key_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the full RestAPI key path.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A list of keys using RestAPI syntax.
+    :rtype: list
+    """
+    keys = _FLATTEN.split(attr_desc["key"])
+    return ([_decode_attribute_map_key(k) for k in keys], value)
+
+
+def last_restapi_key_transformer(key, attr_desc, value):
+    """A key transformer that returns the last RestAPI key.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: The last RestAPI key.
+    :rtype: str
+    """
+    key, value = full_restapi_key_transformer(key, attr_desc, value)
+    return (key[-1], value)
+
+
+def _create_xml_node(tag, prefix=None, ns=None):
+    """Create a XML node.
+
+    :param str tag: The tag name
+    :param str prefix: The prefix
+    :param str ns: The namespace
+    :return: The XML node
+    :rtype: xml.etree.ElementTree.Element
+    """
+    if prefix and ns:
+        ET.register_namespace(prefix, ns)
+    if ns:
+        return ET.Element("{" + ns + "}" + tag)
+    return ET.Element(tag)
+
+
+class Model:
+    """Mixin for all client request body/response body models to support
+    serialization and deserialization.
+    """
+
+    _subtype_map: Dict[str, Dict[str, Any]] = {}
+    _attribute_map: Dict[str, Dict[str, Any]] = {}
+    _validation: Dict[str, Dict[str, Any]] = {}
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.additional_properties: Optional[Dict[str, Any]] = {}
+        for k in kwargs:  # pylint: disable=consider-using-dict-items
+            if k not in self._attribute_map:
+                _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+            elif k in self._validation and self._validation[k].get("readonly", False):
+                _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+            else:
+                setattr(self, k, kwargs[k])
+
+    def __eq__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are equal
+        :rtype: bool
+        """
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    def __ne__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are not equal
+        :rtype: bool
+        """
+        return not self.__eq__(other)
+
+    def __str__(self) -> str:
+        return str(self.__dict__)
+
+    @classmethod
+    def enable_additional_properties_sending(cls) -> None:
+        cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+    @classmethod
+    def is_xml_model(cls) -> bool:
+        try:
+            cls._xml_map  # type: ignore
+        except AttributeError:
+            return False
+        return True
+
+    @classmethod
+    def _create_xml_node(cls):
+        """Create XML node.
+
+        :returns: The XML node
+        :rtype: xml.etree.ElementTree.Element
+        """
+        try:
+            xml_map = cls._xml_map  # type: ignore
+        except AttributeError:
+            xml_map = {}
+
+        return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+    def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+        """Return the JSON that would be sent to server from this model.
+
+        This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, keep_readonly=keep_readonly, **kwargs
+        )
+
+    def as_dict(
+        self,
+        keep_readonly: bool = True,
+        key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer,
+        **kwargs: Any
+    ) -> JSON:
+        """Return a dict that can be serialized using json.dump.
+
+        Advanced usage might optionally use a callback as parameter:
+
+        .. code::python
+
+            def my_key_transformer(key, attr_desc, value):
+                return key
+
+        Key is the attribute name used in Python. Attr_desc
+        is a dict of metadata. Currently contains 'type' with the
+        msrest type and 'key' with the RestAPI encoded key.
+        Value is the current value in this object.
+
+        The string returned will be used to serialize the key.
+        If the return type is a list, this is considered hierarchical
+        result dict.
+
+        See the three examples in this file:
+
+        - attribute_transformer
+        - full_restapi_key_transformer
+        - last_restapi_key_transformer
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :param function key_transformer: A key transformer function.
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+        )
+
+    @classmethod
+    def _infer_class_models(cls):
+        try:
+            str_models = cls.__module__.rsplit(".", 1)[0]
+            models = sys.modules[str_models]
+            client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+            if cls.__name__ not in client_models:
+                raise ValueError("Not Autorest generated code")
+        except Exception:  # pylint: disable=broad-exception-caught
+            # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+            client_models = {cls.__name__: cls}
+        return client_models
+
+    @classmethod
+    def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+        """Parse a str using the RestAPI syntax and return a model.
+
+        :param str data: A str using RestAPI structure. JSON by default.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises DeserializationError: if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def from_dict(
+        cls,
+        data: Any,
+        key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None,
+        content_type: Optional[str] = None,
+    ) -> Self:
+        """Parse a dict using given key extractor return a model.
+
+        By default consider key
+        extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+        and last_rest_key_case_insensitive_extractor)
+
+        :param dict data: A dict using RestAPI structure
+        :param function key_extractors: A key extractor function.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises: DeserializationError if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        deserializer.key_extractors = (  # type: ignore
+            [  # type: ignore
+                attribute_key_case_insensitive_extractor,
+                rest_key_case_insensitive_extractor,
+                last_rest_key_case_insensitive_extractor,
+            ]
+            if key_extractors is None
+            else key_extractors
+        )
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def _flatten_subtype(cls, key, objects):
+        if "_subtype_map" not in cls.__dict__:
+            return {}
+        result = dict(cls._subtype_map[key])
+        for valuetype in cls._subtype_map[key].values():
+            result.update(objects[valuetype]._flatten_subtype(key, objects))  # pylint: disable=protected-access
+        return result
+
+    @classmethod
+    def _classify(cls, response, objects):
+        """Check the class _subtype_map for any child classes.
+        We want to ignore any inherited _subtype_maps.
+
+        :param dict response: The initial data
+        :param dict objects: The class objects
+        :returns: The class to be used
+        :rtype: class
+        """
+        for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+            subtype_value = None
+
+            if not isinstance(response, ET.Element):
+                rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+                subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+            else:
+                subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+            if subtype_value:
+                # Try to match base class. Can be class name only
+                # (bug to fix in Autorest to support x-ms-discriminator-name)
+                if cls.__name__ == subtype_value:
+                    return cls
+                flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+                try:
+                    return objects[flatten_mapping_type[subtype_value]]  # type: ignore
+                except KeyError:
+                    _LOGGER.warning(
+                        "Subtype value %s has no mapping, use base class %s.",
+                        subtype_value,
+                        cls.__name__,
+                    )
+                    break
+            else:
+                _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+                break
+        return cls
+
+    @classmethod
+    def _get_rest_key_parts(cls, attr_key):
+        """Get the RestAPI key of this attr, split it and decode part
+        :param str attr_key: Attribute key must be in attribute_map.
+        :returns: A list of RestAPI part
+        :rtype: list
+        """
+        rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+        return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+    """This decode a key in an _attribute_map to the actual key we want to look at
+    inside the received data.
+
+    :param str key: A key string from the generated code
+    :returns: The decoded key
+    :rtype: str
+    """
+    return key.replace("\\.", ".")
+
+
+class Serializer:  # pylint: disable=too-many-public-methods
+    """Request object model serializer."""
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+    days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+    months = {
+        1: "Jan",
+        2: "Feb",
+        3: "Mar",
+        4: "Apr",
+        5: "May",
+        6: "Jun",
+        7: "Jul",
+        8: "Aug",
+        9: "Sep",
+        10: "Oct",
+        11: "Nov",
+        12: "Dec",
+    }
+    validation = {
+        "min_length": lambda x, y: len(x) < y,
+        "max_length": lambda x, y: len(x) > y,
+        "minimum": lambda x, y: x < y,
+        "maximum": lambda x, y: x > y,
+        "minimum_ex": lambda x, y: x <= y,
+        "maximum_ex": lambda x, y: x >= y,
+        "min_items": lambda x, y: len(x) < y,
+        "max_items": lambda x, y: len(x) > y,
+        "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+        "unique": lambda x, y: len(x) != len(set(x)),
+        "multiple": lambda x, y: x % y != 0,
+    }
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.serialize_type = {
+            "iso-8601": Serializer.serialize_iso,
+            "rfc-1123": Serializer.serialize_rfc,
+            "unix-time": Serializer.serialize_unix,
+            "duration": Serializer.serialize_duration,
+            "date": Serializer.serialize_date,
+            "time": Serializer.serialize_time,
+            "decimal": Serializer.serialize_decimal,
+            "long": Serializer.serialize_long,
+            "bytearray": Serializer.serialize_bytearray,
+            "base64": Serializer.serialize_base64,
+            "object": self.serialize_object,
+            "[]": self.serialize_iter,
+            "{}": self.serialize_dict,
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_transformer = full_restapi_key_transformer
+        self.client_side_validation = True
+
+    def _serialize(  # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+        self, target_obj, data_type=None, **kwargs
+    ):
+        """Serialize data into a string according to type.
+
+        :param object target_obj: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, dict
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        """
+        key_transformer = kwargs.get("key_transformer", self.key_transformer)
+        keep_readonly = kwargs.get("keep_readonly", False)
+        if target_obj is None:
+            return None
+
+        attr_name = None
+        class_name = target_obj.__class__.__name__
+
+        if data_type:
+            return self.serialize_data(target_obj, data_type, **kwargs)
+
+        if not hasattr(target_obj, "_attribute_map"):
+            data_type = type(target_obj).__name__
+            if data_type in self.basic_types.values():
+                return self.serialize_data(target_obj, data_type, **kwargs)
+
+        # Force "is_xml" kwargs if we detect a XML model
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+        serialized = {}
+        if is_xml_model_serialization:
+            serialized = target_obj._create_xml_node()  # pylint: disable=protected-access
+        try:
+            attributes = target_obj._attribute_map  # pylint: disable=protected-access
+            for attr, attr_desc in attributes.items():
+                attr_name = attr
+                if not keep_readonly and target_obj._validation.get(  # pylint: disable=protected-access
+                    attr_name, {}
+                ).get("readonly", False):
+                    continue
+
+                if attr_name == "additional_properties" and attr_desc["key"] == "":
+                    if target_obj.additional_properties is not None:
+                        serialized.update(target_obj.additional_properties)
+                    continue
+                try:
+
+                    orig_attr = getattr(target_obj, attr)
+                    if is_xml_model_serialization:
+                        pass  # Don't provide "transformer" for XML for now. Keep "orig_attr"
+                    else:  # JSON
+                        keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+                        keys = keys if isinstance(keys, list) else [keys]
+
+                    kwargs["serialization_ctxt"] = attr_desc
+                    new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+                    if is_xml_model_serialization:
+                        xml_desc = attr_desc.get("xml", {})
+                        xml_name = xml_desc.get("name", attr_desc["key"])
+                        xml_prefix = xml_desc.get("prefix", None)
+                        xml_ns = xml_desc.get("ns", None)
+                        if xml_desc.get("attr", False):
+                            if xml_ns:
+                                ET.register_namespace(xml_prefix, xml_ns)
+                                xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+                            serialized.set(xml_name, new_attr)  # type: ignore
+                            continue
+                        if xml_desc.get("text", False):
+                            serialized.text = new_attr  # type: ignore
+                            continue
+                        if isinstance(new_attr, list):
+                            serialized.extend(new_attr)  # type: ignore
+                        elif isinstance(new_attr, ET.Element):
+                            # If the down XML has no XML/Name,
+                            # we MUST replace the tag with the local tag. But keeping the namespaces.
+                            if "name" not in getattr(orig_attr, "_xml_map", {}):
+                                splitted_tag = new_attr.tag.split("}")
+                                if len(splitted_tag) == 2:  # Namespace
+                                    new_attr.tag = "}".join([splitted_tag[0], xml_name])
+                                else:
+                                    new_attr.tag = xml_name
+                            serialized.append(new_attr)  # type: ignore
+                        else:  # That's a basic type
+                            # Integrate namespace if necessary
+                            local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+                            local_node.text = str(new_attr)
+                            serialized.append(local_node)  # type: ignore
+                    else:  # JSON
+                        for k in reversed(keys):  # type: ignore
+                            new_attr = {k: new_attr}
+
+                        _new_attr = new_attr
+                        _serialized = serialized
+                        for k in keys:  # type: ignore
+                            if k not in _serialized:
+                                _serialized.update(_new_attr)  # type: ignore
+                            _new_attr = _new_attr[k]  # type: ignore
+                            _serialized = _serialized[k]
+                except ValueError as err:
+                    if isinstance(err, SerializationError):
+                        raise
+
+        except (AttributeError, KeyError, TypeError) as err:
+            msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+            raise SerializationError(msg) from err
+        return serialized
+
+    def body(self, data, data_type, **kwargs):
+        """Serialize data intended for a request body.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: dict
+        :raises SerializationError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized request body
+        """
+
+        # Just in case this is a dict
+        internal_data_type_str = data_type.strip("[]{}")
+        internal_data_type = self.dependencies.get(internal_data_type_str, None)
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            if internal_data_type and issubclass(internal_data_type, Model):
+                is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+            else:
+                is_xml_model_serialization = False
+        if internal_data_type and not isinstance(internal_data_type, Enum):
+            try:
+                deserializer = Deserializer(self.dependencies)
+                # Since it's on serialization, it's almost sure that format is not JSON REST
+                # We're not able to deal with additional properties for now.
+                deserializer.additional_properties_detection = False
+                if is_xml_model_serialization:
+                    deserializer.key_extractors = [  # type: ignore
+                        attribute_key_case_insensitive_extractor,
+                    ]
+                else:
+                    deserializer.key_extractors = [
+                        rest_key_case_insensitive_extractor,
+                        attribute_key_case_insensitive_extractor,
+                        last_rest_key_case_insensitive_extractor,
+                    ]
+                data = deserializer._deserialize(data_type, data)  # pylint: disable=protected-access
+            except DeserializationError as err:
+                raise SerializationError("Unable to build a model: " + str(err)) from err
+
+        return self._serialize(data, data_type, **kwargs)
+
+    def url(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL path.
+
+        :param str name: The name of the URL path parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :returns: The serialized URL path
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        """
+        try:
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+                output = output.replace("{", quote("{")).replace("}", quote("}"))
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return output
+
+    def query(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL query.
+
+        :param str name: The name of the query parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, list
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized query parameter
+        """
+        try:
+            # Treat the list aside, since we don't want to encode the div separator
+            if data_type.startswith("["):
+                internal_data_type = data_type[1:-1]
+                do_quote = not kwargs.get("skip_quote", False)
+                return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+            # Not a list, regular serialization
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def header(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a request header.
+
+        :param str name: The name of the header.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized header
+        """
+        try:
+            if data_type in ["[str]"]:
+                data = ["" if d is None else d for d in data]
+
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def serialize_data(self, data, data_type, **kwargs):
+        """Serialize generic data according to supplied data type.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :raises AttributeError: if required data is None.
+        :raises ValueError: if data is None
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        :rtype: str, int, float, bool, dict, list
+        """
+        if data is None:
+            raise ValueError("No value for given attribute")
+
+        try:
+            if data is CoreNull:
+                return None
+            if data_type in self.basic_types.values():
+                return self.serialize_basic(data, data_type, **kwargs)
+
+            if data_type in self.serialize_type:
+                return self.serialize_type[data_type](data, **kwargs)
+
+            # If dependencies is empty, try with current data class
+            # It has to be a subclass of Enum anyway
+            enum_type = self.dependencies.get(data_type, data.__class__)
+            if issubclass(enum_type, Enum):
+                return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.serialize_type:
+                return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+        except (ValueError, TypeError) as err:
+            msg = "Unable to serialize value: {!r} as type: {!r}."
+            raise SerializationError(msg.format(data, data_type)) from err
+        return self._serialize(data, **kwargs)
+
+    @classmethod
+    def _get_custom_serializers(cls, data_type, **kwargs):  # pylint: disable=inconsistent-return-statements
+        custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+        if custom_serializer:
+            return custom_serializer
+        if kwargs.get("is_xml", False):
+            return cls._xml_basic_types_serializers.get(data_type)
+
+    @classmethod
+    def serialize_basic(cls, data, data_type, **kwargs):
+        """Serialize basic builting data type.
+        Serializes objects to str, int, float or bool.
+
+        Possible kwargs:
+        - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+        - is_xml bool : If set, use xml_basic_types_serializers
+
+        :param obj data: Object to be serialized.
+        :param str data_type: Type of object in the iterable.
+        :rtype: str, int, float, bool
+        :return: serialized object
+        """
+        custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+        if custom_serializer:
+            return custom_serializer(data)
+        if data_type == "str":
+            return cls.serialize_unicode(data)
+        return eval(data_type)(data)  # nosec # pylint: disable=eval-used
+
+    @classmethod
+    def serialize_unicode(cls, data):
+        """Special handling for serializing unicode strings in Py2.
+        Encode to UTF-8 if unicode, otherwise handle as a str.
+
+        :param str data: Object to be serialized.
+        :rtype: str
+        :return: serialized object
+        """
+        try:  # If I received an enum, return its value
+            return data.value
+        except AttributeError:
+            pass
+
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                # Don't change it, JSON and XML ElementTree are totally able
+                # to serialize correctly u'' strings
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    def serialize_iter(self, data, iter_type, div=None, **kwargs):
+        """Serialize iterable.
+
+        Supported kwargs:
+        - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+          serialization_ctxt['type'] should be same as data_type.
+        - is_xml bool : If set, serialize as XML
+
+        :param list data: Object to be serialized.
+        :param str iter_type: Type of object in the iterable.
+        :param str div: If set, this str will be used to combine the elements
+         in the iterable into a combined string. Default is 'None'.
+        Defaults to False.
+        :rtype: list, str
+        :return: serialized iterable
+        """
+        if isinstance(data, str):
+            raise SerializationError("Refuse str type as a valid iter type.")
+
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        is_xml = kwargs.get("is_xml", False)
+
+        serialized = []
+        for d in data:
+            try:
+                serialized.append(self.serialize_data(d, iter_type, **kwargs))
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized.append(None)
+
+        if kwargs.get("do_quote", False):
+            serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+        if div:
+            serialized = ["" if s is None else str(s) for s in serialized]
+            serialized = div.join(serialized)
+
+        if "xml" in serialization_ctxt or is_xml:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt.get("xml", {})
+            xml_name = xml_desc.get("name")
+            if not xml_name:
+                xml_name = serialization_ctxt["key"]
+
+            # Create a wrap node if necessary (use the fact that Element and list have "append")
+            is_wrapped = xml_desc.get("wrapped", False)
+            node_name = xml_desc.get("itemsName", xml_name)
+            if is_wrapped:
+                final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            else:
+                final_result = []
+            # All list elements to "local_node"
+            for el in serialized:
+                if isinstance(el, ET.Element):
+                    el_node = el
+                else:
+                    el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+                    if el is not None:  # Otherwise it writes "None" :-p
+                        el_node.text = str(el)
+                final_result.append(el_node)
+            return final_result
+        return serialized
+
+    def serialize_dict(self, attr, dict_type, **kwargs):
+        """Serialize a dictionary of objects.
+
+        :param dict attr: Object to be serialized.
+        :param str dict_type: Type of object in the dictionary.
+        :rtype: dict
+        :return: serialized dictionary
+        """
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        serialized = {}
+        for key, value in attr.items():
+            try:
+                serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized[self.serialize_unicode(key)] = None
+
+        if "xml" in serialization_ctxt:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt["xml"]
+            xml_name = xml_desc["name"]
+
+            final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            for key, value in serialized.items():
+                ET.SubElement(final_result, key).text = value
+            return final_result
+
+        return serialized
+
+    def serialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Serialize a generic object.
+        This will be handled as a dictionary. If object passed in is not
+        a basic type (str, int, float, dict, list) it will simply be
+        cast to str.
+
+        :param dict attr: Object to be serialized.
+        :rtype: dict or str
+        :return: serialized object
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            return attr
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+        if obj_type is _long_type:
+            return self.serialize_long(attr)
+        if obj_type is str:
+            return self.serialize_unicode(attr)
+        if obj_type is datetime.datetime:
+            return self.serialize_iso(attr)
+        if obj_type is datetime.date:
+            return self.serialize_date(attr)
+        if obj_type is datetime.time:
+            return self.serialize_time(attr)
+        if obj_type is datetime.timedelta:
+            return self.serialize_duration(attr)
+        if obj_type is decimal.Decimal:
+            return self.serialize_decimal(attr)
+
+        # If it's a model or I know this dependency, serialize as a Model
+        if obj_type in self.dependencies.values() or isinstance(attr, Model):
+            return self._serialize(attr)
+
+        if obj_type == dict:
+            serialized = {}
+            for key, value in attr.items():
+                try:
+                    serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+                except ValueError:
+                    serialized[self.serialize_unicode(key)] = None
+            return serialized
+
+        if obj_type == list:
+            serialized = []
+            for obj in attr:
+                try:
+                    serialized.append(self.serialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return serialized
+        return str(attr)
+
+    @staticmethod
+    def serialize_enum(attr, enum_obj=None):
+        try:
+            result = attr.value
+        except AttributeError:
+            result = attr
+        try:
+            enum_obj(result)  # type: ignore
+            return result
+        except ValueError as exc:
+            for enum_value in enum_obj:  # type: ignore
+                if enum_value.value.lower() == str(attr).lower():
+                    return enum_value.value
+            error = "{!r} is not valid value for enum {!r}"
+            raise SerializationError(error.format(attr, enum_obj)) from exc
+
+    @staticmethod
+    def serialize_bytearray(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize bytearray into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        return b64encode(attr).decode()
+
+    @staticmethod
+    def serialize_base64(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize str into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        encoded = b64encode(attr).decode("ascii")
+        return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+    @staticmethod
+    def serialize_decimal(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Decimal object to float.
+
+        :param decimal attr: Object to be serialized.
+        :rtype: float
+        :return: serialized decimal
+        """
+        return float(attr)
+
+    @staticmethod
+    def serialize_long(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize long (Py2) or int (Py3).
+
+        :param int attr: Object to be serialized.
+        :rtype: int/long
+        :return: serialized long
+        """
+        return _long_type(attr)
+
+    @staticmethod
+    def serialize_date(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Date object into ISO-8601 formatted string.
+
+        :param Date attr: Object to be serialized.
+        :rtype: str
+        :return: serialized date
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_date(attr)
+        t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+        return t
+
+    @staticmethod
+    def serialize_time(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Time object into ISO-8601 formatted string.
+
+        :param datetime.time attr: Object to be serialized.
+        :rtype: str
+        :return: serialized time
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_time(attr)
+        t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+        if attr.microsecond:
+            t += ".{:02}".format(attr.microsecond)
+        return t
+
+    @staticmethod
+    def serialize_duration(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize TimeDelta object into ISO-8601 formatted string.
+
+        :param TimeDelta attr: Object to be serialized.
+        :rtype: str
+        :return: serialized duration
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_duration(attr)
+        return isodate.duration_isoformat(attr)
+
+    @staticmethod
+    def serialize_rfc(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into RFC-1123 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises TypeError: if format invalid.
+        :return: serialized rfc
+        """
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+        except AttributeError as exc:
+            raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+        return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+            Serializer.days[utc.tm_wday],
+            utc.tm_mday,
+            Serializer.months[utc.tm_mon],
+            utc.tm_year,
+            utc.tm_hour,
+            utc.tm_min,
+            utc.tm_sec,
+        )
+
+    @staticmethod
+    def serialize_iso(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into ISO-8601 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises SerializationError: if format invalid.
+        :return: serialized iso
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_datetime(attr)
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+            if utc.tm_year > 9999 or utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+
+            microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+            if microseconds:
+                microseconds = "." + microseconds
+            date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+                utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+            )
+            return date + microseconds + "Z"
+        except (ValueError, OverflowError) as err:
+            msg = "Unable to serialize datetime object."
+            raise SerializationError(msg) from err
+        except AttributeError as err:
+            msg = "ISO-8601 object must be valid Datetime object."
+            raise TypeError(msg) from err
+
+    @staticmethod
+    def serialize_unix(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: int
+        :raises SerializationError: if format invalid
+        :return: serialied unix
+        """
+        if isinstance(attr, int):
+            return attr
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            return int(calendar.timegm(attr.utctimetuple()))
+        except AttributeError as exc:
+            raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        # Need the cast, as for some reasons "split" is typed as list[str | Any]
+        dict_keys = cast(List[str], _FLATTEN.split(key))
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = working_data.get(working_key, data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor(  # pylint: disable=unused-argument, inconsistent-return-statements
+    attr, attr_desc, data
+):
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        dict_keys = _FLATTEN.split(key)
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    if working_data:
+        return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    This is the case insensitive version of "last_rest_key_extractor"
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+    return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+    found_key = None
+    lower_attr = attr.lower()
+    for key in data:
+        if lower_attr == key.lower():
+            found_key = key
+            break
+
+    return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+    """Given an internal type XML description, extract correct XML name with namespace.
+
+    :param dict internal_type: An model type
+    :rtype: tuple
+    :returns: A tuple XML name + namespace dict
+    """
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+    xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+    xml_ns = internal_type_xml_map.get("ns", None)
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+    return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument,too-many-return-statements
+    if isinstance(data, dict):
+        return None
+
+    # Test if this model is XML ready first
+    if not isinstance(data, ET.Element):
+        return None
+
+    xml_desc = attr_desc.get("xml", {})
+    xml_name = xml_desc.get("name", attr_desc["key"])
+
+    # Look for a children
+    is_iter_type = attr_desc["type"].startswith("[")
+    is_wrapped = xml_desc.get("wrapped", False)
+    internal_type = attr_desc.get("internalType", None)
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+    # Integrate namespace if necessary
+    xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+    # If it's an attribute, that's simple
+    if xml_desc.get("attr", False):
+        return data.get(xml_name)
+
+    # If it's x-ms-text, that's simple too
+    if xml_desc.get("text", False):
+        return data.text
+
+    # Scenario where I take the local name:
+    # - Wrapped node
+    # - Internal type is an enum (considered basic types)
+    # - Internal type has no XML/Name node
+    if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+        children = data.findall(xml_name)
+    # If internal type has a local name and it's not a list, I use that name
+    elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+        xml_name = _extract_name_from_internal_type(internal_type)
+        children = data.findall(xml_name)
+    # That's an array
+    else:
+        if internal_type:  # Complex type, ignore itemsName and use the complex type name
+            items_name = _extract_name_from_internal_type(internal_type)
+        else:
+            items_name = xml_desc.get("itemsName", xml_name)
+        children = data.findall(items_name)
+
+    if len(children) == 0:
+        if is_iter_type:
+            if is_wrapped:
+                return None  # is_wrapped no node, we want None
+            return []  # not wrapped, assume empty list
+        return None  # Assume it's not there, maybe an optional node.
+
+    # If is_iter_type and not wrapped, return all found children
+    if is_iter_type:
+        if not is_wrapped:
+            return children
+        # Iter and wrapped, should have found one node only (the wrap one)
+        if len(children) != 1:
+            raise DeserializationError(
+                "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(  # pylint: disable=line-too-long
+                    xml_name
+                )
+            )
+        return list(children[0])  # Might be empty list and that's ok.
+
+    # Here it's not a itertype, we should have found one element only or empty
+    if len(children) > 1:
+        raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+    return children[0]
+
+
+class Deserializer:
+    """Response object model deserializer.
+
+    :param dict classes: Class type dictionary for deserializing complex types.
+    :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+    """
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.deserialize_type = {
+            "iso-8601": Deserializer.deserialize_iso,
+            "rfc-1123": Deserializer.deserialize_rfc,
+            "unix-time": Deserializer.deserialize_unix,
+            "duration": Deserializer.deserialize_duration,
+            "date": Deserializer.deserialize_date,
+            "time": Deserializer.deserialize_time,
+            "decimal": Deserializer.deserialize_decimal,
+            "long": Deserializer.deserialize_long,
+            "bytearray": Deserializer.deserialize_bytearray,
+            "base64": Deserializer.deserialize_base64,
+            "object": self.deserialize_object,
+            "[]": self.deserialize_iter,
+            "{}": self.deserialize_dict,
+        }
+        self.deserialize_expected_types = {
+            "duration": (isodate.Duration, datetime.timedelta),
+            "iso-8601": (datetime.datetime),
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_extractors = [rest_key_extractor, xml_key_extractor]
+        # Additional properties only works if the "rest_key_extractor" is used to
+        # extract the keys. Making it to work whatever the key extractor is too much
+        # complicated, with no real scenario for now.
+        # So adding a flag to disable additional properties detection. This flag should be
+        # used if your expect the deserialization to NOT come from a JSON REST syntax.
+        # Otherwise, result are unexpected
+        self.additional_properties_detection = True
+
+    def __call__(self, target_obj, response_data, content_type=None):
+        """Call the deserializer to process a REST response.
+
+        :param str target_obj: Target data type to deserialize to.
+        :param requests.Response response_data: REST response object.
+        :param str content_type: Swagger "produces" if available.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        data = self._unpack_content(response_data, content_type)
+        return self._deserialize(target_obj, data)
+
+    def _deserialize(self, target_obj, data):  # pylint: disable=inconsistent-return-statements
+        """Call the deserializer on a model.
+
+        Data needs to be already deserialized as JSON or XML ElementTree
+
+        :param str target_obj: Target data type to deserialize to.
+        :param object data: Object to deserialize.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        # This is already a model, go recursive just in case
+        if hasattr(data, "_attribute_map"):
+            constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+            try:
+                for attr, mapconfig in data._attribute_map.items():  # pylint: disable=protected-access
+                    if attr in constants:
+                        continue
+                    value = getattr(data, attr)
+                    if value is None:
+                        continue
+                    local_type = mapconfig["type"]
+                    internal_data_type = local_type.strip("[]{}")
+                    if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+                        continue
+                    setattr(data, attr, self._deserialize(local_type, value))
+                return data
+            except AttributeError:
+                return
+
+        response, class_name = self._classify_target(target_obj, data)
+
+        if isinstance(response, str):
+            return self.deserialize_data(data, response)
+        if isinstance(response, type) and issubclass(response, Enum):
+            return self.deserialize_enum(data, response)
+
+        if data is None or data is CoreNull:
+            return data
+        try:
+            attributes = response._attribute_map  # type: ignore # pylint: disable=protected-access
+            d_attrs = {}
+            for attr, attr_desc in attributes.items():
+                # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+                if attr == "additional_properties" and attr_desc["key"] == "":
+                    continue
+                raw_value = None
+                # Enhance attr_desc with some dynamic data
+                attr_desc = attr_desc.copy()  # Do a copy, do not change the real one
+                internal_data_type = attr_desc["type"].strip("[]{}")
+                if internal_data_type in self.dependencies:
+                    attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+                for key_extractor in self.key_extractors:
+                    found_value = key_extractor(attr, attr_desc, data)
+                    if found_value is not None:
+                        if raw_value is not None and raw_value != found_value:
+                            msg = (
+                                "Ignoring extracted value '%s' from %s for key '%s'"
+                                " (duplicate extraction, follow extractors order)"
+                            )
+                            _LOGGER.warning(msg, found_value, key_extractor, attr)
+                            continue
+                        raw_value = found_value
+
+                value = self.deserialize_data(raw_value, attr_desc["type"])
+                d_attrs[attr] = value
+        except (AttributeError, TypeError, KeyError) as err:
+            msg = "Unable to deserialize to object: " + class_name  # type: ignore
+            raise DeserializationError(msg) from err
+        additional_properties = self._build_additional_properties(attributes, data)
+        return self._instantiate_model(response, d_attrs, additional_properties)
+
+    def _build_additional_properties(self, attribute_map, data):
+        if not self.additional_properties_detection:
+            return None
+        if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+            # Check empty string. If it's not empty, someone has a real "additionalProperties"
+            return None
+        if isinstance(data, ET.Element):
+            data = {el.tag: el.text for el in data}
+
+        known_keys = {
+            _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+            for desc in attribute_map.values()
+            if desc["key"] != ""
+        }
+        present_keys = set(data.keys())
+        missing_keys = present_keys - known_keys
+        return {key: data[key] for key in missing_keys}
+
+    def _classify_target(self, target, data):
+        """Check to see whether the deserialization target object can
+        be classified into a subclass.
+        Once classification has been determined, initialize object.
+
+        :param str target: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :return: The classified target object and its class name.
+        :rtype: tuple
+        """
+        if target is None:
+            return None, None
+
+        if isinstance(target, str):
+            try:
+                target = self.dependencies[target]
+            except KeyError:
+                return target, target
+
+        try:
+            target = target._classify(data, self.dependencies)  # type: ignore # pylint: disable=protected-access
+        except AttributeError:
+            pass  # Target is not a Model, no classify
+        return target, target.__class__.__name__  # type: ignore
+
+    def failsafe_deserialize(self, target_obj, data, content_type=None):
+        """Ignores any errors encountered in deserialization,
+        and falls back to not deserializing the object. Recommended
+        for use in error deserialization, as we want to return the
+        HttpResponseError to users, and not have them deal with
+        a deserialization error.
+
+        :param str target_obj: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :param str content_type: Swagger "produces" if available.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        try:
+            return self(target_obj, data, content_type=content_type)
+        except:  # pylint: disable=bare-except
+            _LOGGER.debug(
+                "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+            )
+            return None
+
+    @staticmethod
+    def _unpack_content(raw_data, content_type=None):
+        """Extract the correct structure for deserialization.
+
+        If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+        if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+        If not a pipeline response and raw_data is bytes or string, use content-type
+        to decode it. If no content-type, try JSON.
+
+        If raw_data is something else, bypass all logic and return it directly.
+
+        :param obj raw_data: Data to be processed.
+        :param str content_type: How to parse if raw_data is a string/bytes.
+        :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+        :raises UnicodeDecodeError: If bytes is not UTF8
+        :rtype: object
+        :return: Unpacked content.
+        """
+        # Assume this is enough to detect a Pipeline Response without importing it
+        context = getattr(raw_data, "context", {})
+        if context:
+            if RawDeserializer.CONTEXT_NAME in context:
+                return context[RawDeserializer.CONTEXT_NAME]
+            raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+        # Assume this is enough to recognize universal_http.ClientResponse without importing it
+        if hasattr(raw_data, "body"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+        # Assume this enough to recognize requests.Response without importing it.
+        if hasattr(raw_data, "_content_consumed"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+        if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+            return RawDeserializer.deserialize_from_text(raw_data, content_type)  # type: ignore
+        return raw_data
+
+    def _instantiate_model(self, response, attrs, additional_properties=None):
+        """Instantiate a response model passing in deserialized args.
+
+        :param Response response: The response model class.
+        :param dict attrs: The deserialized response attributes.
+        :param dict additional_properties: Additional properties to be set.
+        :rtype: Response
+        :return: The instantiated response model.
+        """
+        if callable(response):
+            subtype = getattr(response, "_subtype_map", {})
+            try:
+                readonly = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("readonly")
+                ]
+                const = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("constant")
+                ]
+                kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+                response_obj = response(**kwargs)
+                for attr in readonly:
+                    setattr(response_obj, attr, attrs.get(attr))
+                if additional_properties:
+                    response_obj.additional_properties = additional_properties  # type: ignore
+                return response_obj
+            except TypeError as err:
+                msg = "Unable to deserialize {} into model {}. ".format(kwargs, response)  # type: ignore
+                raise DeserializationError(msg + str(err)) from err
+        else:
+            try:
+                for attr, value in attrs.items():
+                    setattr(response, attr, value)
+                return response
+            except Exception as exp:
+                msg = "Unable to populate response model. "
+                msg += "Type: {}, Error: {}".format(type(response), exp)
+                raise DeserializationError(msg) from exp
+
+    def deserialize_data(self, data, data_type):  # pylint: disable=too-many-return-statements
+        """Process data for deserialization according to data type.
+
+        :param str data: The response string to be deserialized.
+        :param str data_type: The type to deserialize to.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        if data is None:
+            return data
+
+        try:
+            if not data_type:
+                return data
+            if data_type in self.basic_types.values():
+                return self.deserialize_basic(data, data_type)
+            if data_type in self.deserialize_type:
+                if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+                    return data
+
+                is_a_text_parsing_type = lambda x: x not in [  # pylint: disable=unnecessary-lambda-assignment
+                    "object",
+                    "[]",
+                    r"{}",
+                ]
+                if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+                    return None
+                data_val = self.deserialize_type[data_type](data)
+                return data_val
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.deserialize_type:
+                return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+            obj_type = self.dependencies[data_type]
+            if issubclass(obj_type, Enum):
+                if isinstance(data, ET.Element):
+                    data = data.text
+                return self.deserialize_enum(data, obj_type)
+
+        except (ValueError, TypeError, AttributeError) as err:
+            msg = "Unable to deserialize response data."
+            msg += " Data: {}, {}".format(data, data_type)
+            raise DeserializationError(msg) from err
+        return self._deserialize(obj_type, data)
+
+    def deserialize_iter(self, attr, iter_type):
+        """Deserialize an iterable.
+
+        :param list attr: Iterable to be deserialized.
+        :param str iter_type: The type of object in the iterable.
+        :return: Deserialized iterable.
+        :rtype: list
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):  # If I receive an element here, get the children
+            attr = list(attr)
+        if not isinstance(attr, (list, set)):
+            raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+        return [self.deserialize_data(a, iter_type) for a in attr]
+
+    def deserialize_dict(self, attr, dict_type):
+        """Deserialize a dictionary.
+
+        :param dict/list attr: Dictionary to be deserialized. Also accepts
+         a list of key, value pairs.
+        :param str dict_type: The object type of the items in the dictionary.
+        :return: Deserialized dictionary.
+        :rtype: dict
+        """
+        if isinstance(attr, list):
+            return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+        if isinstance(attr, ET.Element):
+            # Transform <Key>value</Key> into {"Key": "value"}
+            attr = {el.tag: el.text for el in attr}
+        return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+    def deserialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Deserialize a generic object.
+        This will be handled as a dictionary.
+
+        :param dict attr: Dictionary to be deserialized.
+        :return: Deserialized object.
+        :rtype: dict
+        :raises TypeError: if non-builtin datatype encountered.
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            # Do no recurse on XML, just return the tree as-is
+            return attr
+        if isinstance(attr, str):
+            return self.deserialize_basic(attr, "str")
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.deserialize_basic(attr, self.basic_types[obj_type])
+        if obj_type is _long_type:
+            return self.deserialize_long(attr)
+
+        if obj_type == dict:
+            deserialized = {}
+            for key, value in attr.items():
+                try:
+                    deserialized[key] = self.deserialize_object(value, **kwargs)
+                except ValueError:
+                    deserialized[key] = None
+            return deserialized
+
+        if obj_type == list:
+            deserialized = []
+            for obj in attr:
+                try:
+                    deserialized.append(self.deserialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return deserialized
+
+        error = "Cannot deserialize generic object with type: "
+        raise TypeError(error + str(obj_type))
+
+    def deserialize_basic(self, attr, data_type):  # pylint: disable=too-many-return-statements
+        """Deserialize basic builtin data type from string.
+        Will attempt to convert to str, int, float and bool.
+        This function will also accept '1', '0', 'true' and 'false' as
+        valid bool values.
+
+        :param str attr: response string to be deserialized.
+        :param str data_type: deserialization data type.
+        :return: Deserialized basic type.
+        :rtype: str, int, float or bool
+        :raises TypeError: if string format is not valid.
+        """
+        # If we're here, data is supposed to be a basic type.
+        # If it's still an XML node, take the text
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+            if not attr:
+                if data_type == "str":
+                    # None or '', node <a/> is empty string.
+                    return ""
+                # None or '', node <a/> with a strong type is None.
+                # Don't try to model "empty bool" or "empty int"
+                return None
+
+        if data_type == "bool":
+            if attr in [True, False, 1, 0]:
+                return bool(attr)
+            if isinstance(attr, str):
+                if attr.lower() in ["true", "1"]:
+                    return True
+                if attr.lower() in ["false", "0"]:
+                    return False
+            raise TypeError("Invalid boolean value: {}".format(attr))
+
+        if data_type == "str":
+            return self.deserialize_unicode(attr)
+        return eval(data_type)(attr)  # nosec # pylint: disable=eval-used
+
+    @staticmethod
+    def deserialize_unicode(data):
+        """Preserve unicode objects in Python 2, otherwise return data
+        as a string.
+
+        :param str data: response string to be deserialized.
+        :return: Deserialized string.
+        :rtype: str or unicode
+        """
+        # We might be here because we have an enum modeled as string,
+        # and we try to deserialize a partial dict with enum inside
+        if isinstance(data, Enum):
+            return data
+
+        # Consider this is real string
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    @staticmethod
+    def deserialize_enum(data, enum_obj):
+        """Deserialize string into enum object.
+
+        If the string is not a valid enum value it will be returned as-is
+        and a warning will be logged.
+
+        :param str data: Response string to be deserialized. If this value is
+         None or invalid it will be returned as-is.
+        :param Enum enum_obj: Enum object to deserialize to.
+        :return: Deserialized enum object.
+        :rtype: Enum
+        """
+        if isinstance(data, enum_obj) or data is None:
+            return data
+        if isinstance(data, Enum):
+            data = data.value
+        if isinstance(data, int):
+            # Workaround. We might consider remove it in the future.
+            try:
+                return list(enum_obj.__members__.values())[data]
+            except IndexError as exc:
+                error = "{!r} is not a valid index for enum {!r}"
+                raise DeserializationError(error.format(data, enum_obj)) from exc
+        try:
+            return enum_obj(str(data))
+        except ValueError:
+            for enum_value in enum_obj:
+                if enum_value.value.lower() == str(data).lower():
+                    return enum_value
+            # We don't fail anymore for unknown value, we deserialize as a string
+            _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+            return Deserializer.deserialize_unicode(data)
+
+    @staticmethod
+    def deserialize_bytearray(attr):
+        """Deserialize string into bytearray.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized bytearray
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return bytearray(b64decode(attr))  # type: ignore
+
+    @staticmethod
+    def deserialize_base64(attr):
+        """Deserialize base64 encoded string into string.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized base64 string
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        padding = "=" * (3 - (len(attr) + 3) % 4)  # type: ignore
+        attr = attr + padding  # type: ignore
+        encoded = attr.replace("-", "+").replace("_", "/")
+        return b64decode(encoded)
+
+    @staticmethod
+    def deserialize_decimal(attr):
+        """Deserialize string into Decimal object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized decimal
+        :raises DeserializationError: if string format invalid.
+        :rtype: decimal
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            return decimal.Decimal(str(attr))  # type: ignore
+        except decimal.DecimalException as err:
+            msg = "Invalid decimal {}".format(attr)
+            raise DeserializationError(msg) from err
+
+    @staticmethod
+    def deserialize_long(attr):
+        """Deserialize string into long (Py2) or int (Py3).
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized int
+        :rtype: long or int
+        :raises ValueError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return _long_type(attr)  # type: ignore
+
+    @staticmethod
+    def deserialize_duration(attr):
+        """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized duration
+        :rtype: TimeDelta
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            duration = isodate.parse_duration(attr)
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize duration object."
+            raise DeserializationError(msg) from err
+        return duration
+
+    @staticmethod
+    def deserialize_date(attr):
+        """Deserialize ISO-8601 formatted string into Date object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized date
+        :rtype: Date
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+        return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+    @staticmethod
+    def deserialize_time(attr):
+        """Deserialize ISO-8601 formatted string into time object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized time
+        :rtype: datetime.time
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        return isodate.parse_time(attr)
+
+    @staticmethod
+    def deserialize_rfc(attr):
+        """Deserialize RFC-1123 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized RFC datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            parsed_date = email.utils.parsedate_tz(attr)  # type: ignore
+            date_obj = datetime.datetime(
+                *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+            )
+            if not date_obj.tzinfo:
+                date_obj = date_obj.astimezone(tz=TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to rfc datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_iso(attr):
+        """Deserialize ISO-8601 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized ISO datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            attr = attr.upper()  # type: ignore
+            match = Deserializer.valid_date.match(attr)
+            if not match:
+                raise ValueError("Invalid datetime string: " + attr)
+
+            check_decimal = attr.split(".")
+            if len(check_decimal) > 1:
+                decimal_str = ""
+                for digit in check_decimal[1]:
+                    if digit.isdigit():
+                        decimal_str += digit
+                    else:
+                        break
+                if len(decimal_str) > 6:
+                    attr = attr.replace(decimal_str, decimal_str[0:6])
+
+            date_obj = isodate.parse_datetime(attr)
+            test_utc = date_obj.utctimetuple()
+            if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_unix(attr):
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param int attr: Object to be serialized.
+        :return: Deserialized datetime
+        :rtype: Datetime
+        :raises DeserializationError: if format invalid
+        """
+        if isinstance(attr, ET.Element):
+            attr = int(attr.text)  # type: ignore
+        try:
+            attr = int(attr)
+            date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to unix datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
new file mode 100644
index 00000000..8a9f7149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureDataLakeStorageRESTAPI",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000..ecfcec9b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable, Optional
+from typing_extensions import Self
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+
+from .. import models as _models
+from .._serialization import Deserializer, Serializer
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .operations import FileSystemOperations, PathOperations, ServiceOperations
+
+
+class AzureDataLakeStorageRESTAPI:  # pylint: disable=client-accepts-api-version-keyword
+    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations
+    :ivar file_system: FileSystemOperations operations
+    :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations
+    :ivar path: PathOperations operations
+    :vartype path: azure.storage.filedatalake.aio.operations.PathOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", x_ms_lease_duration: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        self._config = AzureDataLakeStorageRESTAPIConfiguration(
+            url=url, x_ms_lease_duration=x_ms_lease_duration, **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(
+        self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+    ) -> Awaitable[AsyncHttpResponse]:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = await client._send_request(request)
+        <AsyncHttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.AsyncHttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    async def close(self) -> None:
+        await self._client.close()
+
+    async def __aenter__(self) -> Self:
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *exc_details: Any) -> None:
+        await self._client.__aexit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
new file mode 100644
index 00000000..57b28d3b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureDataLakeStorageRESTAPIConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureDataLakeStorageRESTAPI.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, x_ms_lease_duration: Optional[int] = None, **kwargs: Any) -> None:
+        resource: Literal["filesystem"] = kwargs.pop("resource", "filesystem")
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.x_ms_lease_duration = x_ms_lease_duration
+        self.resource = resource
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azuredatalakestoragerestapi/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000..56a7ece3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._file_system_operations import FileSystemOperations  # type: ignore
+from ._path_operations import PathOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "FileSystemOperations",
+    "PathOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
new file mode 100644
index 00000000..ee562931
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
@@ -0,0 +1,628 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._file_system_operations import (
+    build_create_request,
+    build_delete_request,
+    build_get_properties_request,
+    build_list_blob_hierarchy_segment_request,
+    build_list_paths_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FileSystemOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`file_system` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create FileSystem.
+
+        Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+        operation fails.  This operation does not support conditional HTTP requests.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set FileSystem Properties.
+
+        Set properties for the FileSystem.  This operation supports conditional HTTP requests.  For
+        more information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get FileSystem Properties.
+
+        All system and user-defined filesystem properties are specified in the response headers.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete FileSystem.
+
+        Marks the FileSystem for deletion.  When a FileSystem is deleted, a FileSystem with the same
+        identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+        attempts to create a filesystem with the same identifier will fail with status code 409
+        (Conflict), with the service returning additional error information indicating that the
+        filesystem is being deleted. All other operations, including operations on any files or
+        directories within the filesystem, will fail with status code 404 (Not Found) while the
+        filesystem is being deleted. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_paths(
+        self,
+        recursive: bool,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        path: Optional[str] = None,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.PathList:
+        # pylint: disable=line-too-long
+        """List Paths.
+
+        List FileSystem paths and their properties.
+
+        :param recursive: Required. Required.
+        :type recursive: bool
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param path: Optional.  Filters results to paths within the specified directory. An error
+         occurs if the directory does not exist. Default value is None.
+        :type path: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :return: PathList or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.PathList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[_models.PathList] = kwargs.pop("cls", None)
+
+        _request = build_list_paths_request(
+            url=self._config.url,
+            recursive=recursive,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            continuation=continuation,
+            path=path,
+            max_results=max_results,
+            upn=upn,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        deserialized = self._deserialize("PathList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_blob_hierarchy_segment(
+        self,
+        prefix: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        showonly: Literal["deleted"] = "deleted",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Default value is None.
+        :type delimiter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+        :param showonly: Include this parameter to specify one or more datasets to include in the
+         response. Known values are "deleted" and None. Default value is "deleted".
+        :type showonly: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            delimiter=delimiter,
+            marker=marker,
+            max_results=max_results,
+            include=include,
+            showonly=showonly,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
new file mode 100644
index 00000000..d3ed5c3c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
@@ -0,0 +1,1968 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._path_operations import (
+    build_append_data_request,
+    build_create_request,
+    build_delete_request,
+    build_flush_data_request,
+    build_get_properties_request,
+    build_lease_request,
+    build_read_request,
+    build_set_access_control_recursive_request,
+    build_set_access_control_request,
+    build_set_expiry_request,
+    build_undelete_request,
+    build_update_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class PathOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`path` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        resource: Optional[Union[str, _models.PathResourceType]] = None,
+        continuation: Optional[str] = None,
+        mode: Optional[Union[str, _models.PathRenameMode]] = None,
+        rename_source: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        properties: Optional[str] = None,
+        permissions: Optional[str] = None,
+        umask: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        acl: Optional[str] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_duration: Optional[int] = None,
+        expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+        expires_on: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create File | Create Directory | Rename File | Rename Directory.
+
+        Create or rename a file or directory.    By default, the destination is overwritten and if the
+        destination already exists and has a lease the lease is broken.  This operation supports
+        conditional HTTP requests.  For more information, see `Specifying Conditional Headers for Blob
+        Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+        To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param resource: Required only for Create File and Create Directory. The value must be "file"
+         or "directory". Known values are: "directory" and "file". Default value is None.
+        :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+         behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+         will be "posix". Known values are: "legacy" and "posix". Default value is None.
+        :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+        :param rename_source: An optional file or directory to be renamed.  The value must have the
+         following format: "/{filesystem}/{path}".  If "x-ms-properties" is specified, the properties
+         will overwrite the existing properties; otherwise, the existing properties will be preserved.
+         This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+         characters in the ISO-8859-1 character set. Default value is None.
+        :type rename_source: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+         When creating a file or directory and the parent folder does not have a default ACL, the umask
+         restricts the permissions of the file or directory to be created.  The resulting permission is
+         given by p bitwise and not u, where p is the permission and u is the umask.  For example, if p
+         is 0777 and u is 0057, then the resulting permission is 0720.  The default permission is 0777
+         for a directory and 0666 for a file.  The default umask is 0027.  The umask must be specified
+         in 4-digit octal notation (e.g. 0766). Default value is None.
+        :type umask: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Default value is None.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :param encryption_context: Specifies the encryption context to set on the file. Default value
+         is None.
+        :type encryption_context: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _cache_control = None
+        _content_encoding = None
+        _content_language = None
+        _content_disposition = None
+        _content_type_parameter = None
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=resource,
+            continuation=continuation,
+            mode=mode,
+            cache_control=_cache_control,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            content_disposition=_content_disposition,
+            content_type_parameter=_content_type_parameter,
+            rename_source=rename_source,
+            lease_id=_lease_id,
+            source_lease_id=source_lease_id,
+            properties=properties,
+            permissions=permissions,
+            umask=umask,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            owner=owner,
+            group=group,
+            acl=acl,
+            proposed_lease_id=proposed_lease_id,
+            lease_duration=lease_duration,
+            expiry_options=expiry_options,
+            expires_on=expires_on,
+            encryption_context=encryption_context,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def update(
+        self,
+        action: Union[str, _models.PathUpdateAction],
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        body: IO[bytes],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        max_records: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        properties: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Optional[_models.SetAccessControlRecursiveResponse]:
+        # pylint: disable=line-too-long
+        """Append Data | Flush Data | Set Properties | Set Access Control.
+
+        Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+        sets properties for a file or directory, or sets access control for a file or directory. Data
+        can only be appended to a file. Concurrent writes to the same file using multiple clients are
+        not supported. This operation supports conditional HTTP requests. For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+         flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+         directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+         a file or directory, or  "setAccessControlRecursive" to set the access control list for a
+         directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+         order to use access control.  Also note that the Access Control List (ACL) includes permissions
+         for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+         are mutually exclusive. Known values are: "append", "flush", "setProperties",
+         "setAccessControl", and "setAccessControlRecursive". Required.
+        :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+         maximum number of files or directories on which the acl change will be applied. If omitted or
+         greater than 2,000, the request will process up to 2,000 items. Default value is None.
+        :type max_records: int
+        :param continuation: Optional. The number of paths processed with each invocation is limited.
+         If the number of paths to be processed exceeds this limit, a continuation token is returned in
+         the response header x-ms-continuation. When a continuation token is  returned in the response,
+         it must be percent-encoded and specified in a subsequent invocation of
+         setAccessControlRecursive operation. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: SetAccessControlRecursiveResponse or None or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[Optional[_models.SetAccessControlRecursiveResponse]] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_update_request(
+            url=self._config.url,
+            action=action,
+            mode=mode,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            max_records=max_records,
+            continuation=continuation,
+            force_flag=force_flag,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            properties=properties,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        deserialized = None
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+            deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if response.status_code == 202:
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def lease(
+        self,
+        x_ms_lease_action: Union[str, _models.PathLeaseAction],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        x_ms_lease_break_period: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Lease Path.
+
+        Create and manage a lease to restrict write and delete access to the path. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+         and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+         to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+         lease break period is allowed to elapse, during which time no lease operation except break and
+         release can be performed on the file. When a lease is successfully broken, the response
+         indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+         the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+         change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+         existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. Known values
+         are: "acquire", "break", "change", "renew", "release", and "break". Required.
+        :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+         and  specifies the break period of the lease in seconds.  The lease break  duration must be
+         between 0 and 60 seconds. Default value is None.
+        :type x_ms_lease_break_period: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_lease_request(
+            url=self._config.url,
+            x_ms_lease_action=x_ms_lease_action,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            x_ms_lease_break_period=x_ms_lease_break_period,
+            lease_id=_lease_id,
+            proposed_lease_id=proposed_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            x_ms_lease_duration=self._config.x_ms_lease_duration,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 201, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 201:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 202:
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-time"] = self._deserialize("str", response.headers.get("x-ms-lease-time"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def read(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        x_ms_range_get_content_md5: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """Read File.
+
+        Read the contents of a file.  For read operations, range requests are supported. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+         to be retrieved. Default value is None.
+        :type range: str
+        :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+         together with the Range header, the service returns the MD5 hash for the range, as long as the
+         range is less than or equal to 4MB in size. If this header is specified without the Range
+         header, the service returns status code 400 (Bad Request). If this header is set to true when
+         the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default
+         value is None.
+        :type x_ms_range_get_content_md5: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_read_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            x_ms_range_get_content_md5=x_ms_range_get_content_md5,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        if response.status_code == 206:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-content-md5"] = self._deserialize("str", response.headers.get("x-ms-content-md5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+        upn: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get Properties | Get Status | Get Access Control List.
+
+        Get Properties returns all system and user defined properties for a path. Get Status returns
+        all system defined properties for a path. Get Access Control List returns the access control
+        list for a path. This operation supports conditional HTTP requests.  For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param action: Optional. If the value is "getStatus" only the system defined properties for the
+         path are returned. If the value is "getAccessControl" the access control list is returned in
+         the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+         properties are returned. Known values are: "getAccessControl" and "getStatus". Default value is
+         None.
+        :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            action=action,
+            upn=upn,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-permissions"] = self._deserialize("str", response.headers.get("x-ms-permissions"))
+        response_headers["x-ms-acl"] = self._deserialize("str", response.headers.get("x-ms-acl"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        recursive: Optional[bool] = None,
+        continuation: Optional[str] = None,
+        paginated: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete File | Delete Directory.
+
+        Delete the file or directory. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param recursive: Required. Default value is None.
+        :type recursive: bool
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param paginated: If true, paginated behavior will be seen. Pagination is for the recursive ACL
+         checks as a POSIX requirement in the server and Delete in an atomic operation once the ACL
+         checks are completed. If false or missing, normal default behavior will kick in, which may
+         timeout in case of very large directories due to recursive ACL checks. This new parameter is
+         introduced for backward compatibility. Default value is None.
+        :type paginated: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            recursive=recursive,
+            continuation=continuation,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            paginated=paginated,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-deletion-id"] = self._deserialize("str", response.headers.get("x-ms-deletion-id"))
+
+        if response.status_code == 202:
+            response_headers["Date"] = self._deserialize("str", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control(
+        self,
+        timeout: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_access_control_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control_recursive(
+        self,
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        max_records: Optional[int] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.SetAccessControlRecursiveResponse:
+        # pylint: disable=line-too-long
+        """Set the access control list for a path and sub-paths.
+
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param max_records: Optional. It specifies the maximum number of files or directories on which
+         the acl change will be applied. If omitted or greater than 2,000, the request will process up
+         to 2,000 items. Default value is None.
+        :type max_records: int
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: SetAccessControlRecursiveResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControlRecursive"] = kwargs.pop(
+            "action", _params.pop("action", "setAccessControlRecursive")
+        )
+        cls: ClsType[_models.SetAccessControlRecursiveResponse] = kwargs.pop("cls", None)
+
+        _request = build_set_access_control_recursive_request(
+            url=self._config.url,
+            mode=mode,
+            timeout=timeout,
+            continuation=continuation,
+            force_flag=force_flag,
+            max_records=max_records,
+            acl=acl,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def flush_data(
+        self,
+        timeout: Optional[int] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_flush_data_request(
+            url=self._config.url,
+            timeout=timeout,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def append_data(
+        self,
+        body: IO[bytes],
+        position: Optional[int] = None,
+        timeout: Optional[int] = None,
+        content_length: Optional[int] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        flush: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Append data to the file.
+
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param flush: If file should be flushed after the append. Default value is None.
+        :type flush: bool
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _transactional_content_hash = None
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _transactional_content_hash = path_http_headers.transactional_content_hash
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        _content = body
+
+        _request = build_append_data_request(
+            url=self._config.url,
+            position=position,
+            timeout=timeout,
+            content_length=content_length,
+            transactional_content_hash=_transactional_content_hash,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            flush=flush,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            action=action,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_expiry(
+        self,
+        expiry_options: Union[str, _models.PathExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def undelete(
+        self,
+        timeout: Optional[int] = None,
+        undelete_source: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a path that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+         the soft deleted blob to undelete. Default value is None.
+        :type undelete_source: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            undelete_source=undelete_source,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000..0e0243e9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,161 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterable, Callable, Dict, Literal, Optional, TypeVar
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._service_operations import build_list_file_systems_request
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def list_file_systems(
+        self,
+        prefix: Optional[str] = None,
+        continuation: Optional[str] = None,
+        max_results: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncIterable["_models.FileSystem"]:
+        # pylint: disable=line-too-long
+        """List FileSystems.
+
+        List filesystems and their properties in given account.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: An iterator like instance of either FileSystem or the result of cls(response)
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystem]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+        cls: ClsType[_models.FileSystemList] = kwargs.pop("cls", None)
+
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        def prepare_request(next_link=None):
+            if not next_link:
+
+                _request = build_list_file_systems_request(
+                    url=self._config.url,
+                    prefix=prefix,
+                    continuation=continuation,
+                    max_results=max_results,
+                    request_id_parameter=request_id_parameter,
+                    timeout=timeout,
+                    resource=resource,
+                    version=self._config.version,
+                    headers=_headers,
+                    params=_params,
+                )
+                _request.url = self._client.format_url(_request.url)
+
+            else:
+                _request = HttpRequest("GET", next_link)
+                _request.url = self._client.format_url(_request.url)
+                _request.method = "GET"
+            return _request
+
+        async def extract_data(pipeline_response):
+            deserialized = self._deserialize("FileSystemList", pipeline_response)
+            list_of_elem = deserialized.filesystems
+            if cls:
+                list_of_elem = cls(list_of_elem)  # type: ignore
+            return None, AsyncList(list_of_elem)
+
+        async def get_next(next_link=None):
+            _request = prepare_request(next_link)
+
+            _stream = False
+            pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+                _request, stream=_stream, **kwargs
+            )
+            response = pipeline_response.http_response
+
+            if response.status_code not in [200]:
+                map_error(status_code=response.status_code, response=response, error_map=error_map)
+                error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+                raise HttpResponseError(response=response, model=error)
+
+            return pipeline_response
+
+        return AsyncItemPaged(get_next, extract_data)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py
new file mode 100644
index 00000000..ca1ce1ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py
@@ -0,0 +1,82 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import (  # type: ignore
+    AclFailedEntry,
+    BlobHierarchyListSegment,
+    BlobItemInternal,
+    BlobPrefix,
+    BlobPropertiesInternal,
+    CpkInfo,
+    FileSystem,
+    FileSystemList,
+    LeaseAccessConditions,
+    ListBlobsHierarchySegmentResponse,
+    ModifiedAccessConditions,
+    Path,
+    PathHTTPHeaders,
+    PathList,
+    SetAccessControlRecursiveResponse,
+    SourceModifiedAccessConditions,
+    StorageError,
+    StorageErrorError,
+)
+
+from ._azure_data_lake_storage_restapi_enums import (  # type: ignore
+    LeaseAction,
+    ListBlobsIncludeItem,
+    PathExpiryOptions,
+    PathGetPropertiesAction,
+    PathLeaseAction,
+    PathRenameMode,
+    PathResourceType,
+    PathSetAccessControlRecursiveMode,
+    PathUpdateAction,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AclFailedEntry",
+    "BlobHierarchyListSegment",
+    "BlobItemInternal",
+    "BlobPrefix",
+    "BlobPropertiesInternal",
+    "CpkInfo",
+    "FileSystem",
+    "FileSystemList",
+    "LeaseAccessConditions",
+    "ListBlobsHierarchySegmentResponse",
+    "ModifiedAccessConditions",
+    "Path",
+    "PathHTTPHeaders",
+    "PathList",
+    "SetAccessControlRecursiveResponse",
+    "SourceModifiedAccessConditions",
+    "StorageError",
+    "StorageErrorError",
+    "LeaseAction",
+    "ListBlobsIncludeItem",
+    "PathExpiryOptions",
+    "PathGetPropertiesAction",
+    "PathLeaseAction",
+    "PathRenameMode",
+    "PathResourceType",
+    "PathSetAccessControlRecursiveMode",
+    "PathUpdateAction",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py
new file mode 100644
index 00000000..c9bb43b5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py
@@ -0,0 +1,90 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class LeaseAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """LeaseAction."""
+
+    ACQUIRE = "acquire"
+    AUTO_RENEW = "auto-renew"
+    RELEASE = "release"
+    ACQUIRE_RELEASE = "acquire-release"
+
+
+class ListBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListBlobsIncludeItem."""
+
+    COPY = "copy"
+    DELETED = "deleted"
+    METADATA = "metadata"
+    SNAPSHOTS = "snapshots"
+    UNCOMMITTEDBLOBS = "uncommittedblobs"
+    VERSIONS = "versions"
+    TAGS = "tags"
+
+
+class PathExpiryOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathExpiryOptions."""
+
+    NEVER_EXPIRE = "NeverExpire"
+    RELATIVE_TO_CREATION = "RelativeToCreation"
+    RELATIVE_TO_NOW = "RelativeToNow"
+    ABSOLUTE = "Absolute"
+
+
+class PathGetPropertiesAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathGetPropertiesAction."""
+
+    GET_ACCESS_CONTROL = "getAccessControl"
+    GET_STATUS = "getStatus"
+
+
+class PathLeaseAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathLeaseAction."""
+
+    ACQUIRE = "acquire"
+    BREAK = "break"
+    CHANGE = "change"
+    RENEW = "renew"
+    RELEASE = "release"
+    BREAK_ENUM = "break"
+
+
+class PathRenameMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathRenameMode."""
+
+    LEGACY = "legacy"
+    POSIX = "posix"
+
+
+class PathResourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathResourceType."""
+
+    DIRECTORY = "directory"
+    FILE = "file"
+
+
+class PathSetAccessControlRecursiveMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathSetAccessControlRecursiveMode."""
+
+    SET = "set"
+    MODIFY = "modify"
+    REMOVE = "remove"
+
+
+class PathUpdateAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathUpdateAction."""
+
+    APPEND = "append"
+    FLUSH = "flush"
+    SET_PROPERTIES = "setProperties"
+    SET_ACCESS_CONTROL = "setAccessControl"
+    SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py
new file mode 100644
index 00000000..6289f29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py
@@ -0,0 +1,1041 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+from typing import Any, List, Literal, Optional, TYPE_CHECKING
+
+from .. import _serialization
+
+if TYPE_CHECKING:
+    from .. import models as _models
+
+
+class AclFailedEntry(_serialization.Model):
+    """AclFailedEntry.
+
+    :ivar name:
+    :vartype name: str
+    :ivar type:
+    :vartype type: str
+    :ivar error_message:
+    :vartype error_message: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "type": {"key": "type", "type": "str"},
+        "error_message": {"key": "errorMessage", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        type: Optional[str] = None,
+        error_message: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword type:
+        :paramtype type: str
+        :keyword error_message:
+        :paramtype error_message: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.type = type
+        self.error_message = error_message
+
+
+class BlobHierarchyListSegment(_serialization.Model):
+    """BlobHierarchyListSegment.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar blob_prefixes:
+    :vartype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+    :ivar blob_items: Required.
+    :vartype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+    """
+
+    _validation = {
+        "blob_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "blob_prefixes": {"key": "BlobPrefixes", "type": "[BlobPrefix]"},
+        "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"itemsName": "Blob"}},
+    }
+    _xml_map = {"name": "Blobs"}
+
+    def __init__(
+        self,
+        *,
+        blob_items: List["_models.BlobItemInternal"],
+        blob_prefixes: Optional[List["_models.BlobPrefix"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword blob_prefixes:
+        :paramtype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+        :keyword blob_items: Required.
+        :paramtype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+        """
+        super().__init__(**kwargs)
+        self.blob_prefixes = blob_prefixes
+        self.blob_items = blob_items
+
+
+class BlobItemInternal(_serialization.Model):
+    """An Azure Storage blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    :ivar deleted: Required.
+    :vartype deleted: bool
+    :ivar snapshot: Required.
+    :vartype snapshot: str
+    :ivar version_id:
+    :vartype version_id: str
+    :ivar is_current_version:
+    :vartype is_current_version: bool
+    :ivar properties: Properties of a blob. Required.
+    :vartype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+    :ivar deletion_id:
+    :vartype deletion_id: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "deleted": {"required": True},
+        "snapshot": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "deleted": {"key": "Deleted", "type": "bool"},
+        "snapshot": {"key": "Snapshot", "type": "str"},
+        "version_id": {"key": "VersionId", "type": "str"},
+        "is_current_version": {"key": "IsCurrentVersion", "type": "bool"},
+        "properties": {"key": "Properties", "type": "BlobPropertiesInternal"},
+        "deletion_id": {"key": "DeletionId", "type": "str"},
+    }
+    _xml_map = {"name": "Blob"}
+
+    def __init__(
+        self,
+        *,
+        name: str,
+        deleted: bool,
+        snapshot: str,
+        properties: "_models.BlobPropertiesInternal",
+        version_id: Optional[str] = None,
+        is_current_version: Optional[bool] = None,
+        deletion_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        :keyword deleted: Required.
+        :paramtype deleted: bool
+        :keyword snapshot: Required.
+        :paramtype snapshot: str
+        :keyword version_id:
+        :paramtype version_id: str
+        :keyword is_current_version:
+        :paramtype is_current_version: bool
+        :keyword properties: Properties of a blob. Required.
+        :paramtype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+        :keyword deletion_id:
+        :paramtype deletion_id: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.deleted = deleted
+        self.snapshot = snapshot
+        self.version_id = version_id
+        self.is_current_version = is_current_version
+        self.properties = properties
+        self.deletion_id = deletion_id
+
+
+class BlobPrefix(_serialization.Model):
+    """BlobPrefix.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+    }
+
+    def __init__(self, *, name: str, **kwargs: Any) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+
+
+class BlobPropertiesInternal(_serialization.Model):
+    """Properties of a blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar creation_time:
+    :vartype creation_time: ~datetime.datetime
+    :ivar last_modified: Required.
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag: Required.
+    :vartype etag: str
+    :ivar content_length: Size in bytes.
+    :vartype content_length: int
+    :ivar content_type:
+    :vartype content_type: str
+    :ivar content_encoding:
+    :vartype content_encoding: str
+    :ivar content_language:
+    :vartype content_language: str
+    :ivar content_md5:
+    :vartype content_md5: bytes
+    :ivar content_disposition:
+    :vartype content_disposition: str
+    :ivar cache_control:
+    :vartype cache_control: str
+    :ivar blob_sequence_number:
+    :vartype blob_sequence_number: int
+    :ivar copy_id:
+    :vartype copy_id: str
+    :ivar copy_source:
+    :vartype copy_source: str
+    :ivar copy_progress:
+    :vartype copy_progress: str
+    :ivar copy_completion_time:
+    :vartype copy_completion_time: ~datetime.datetime
+    :ivar copy_status_description:
+    :vartype copy_status_description: str
+    :ivar server_encrypted:
+    :vartype server_encrypted: bool
+    :ivar incremental_copy:
+    :vartype incremental_copy: bool
+    :ivar destination_snapshot:
+    :vartype destination_snapshot: str
+    :ivar deleted_time:
+    :vartype deleted_time: ~datetime.datetime
+    :ivar remaining_retention_days:
+    :vartype remaining_retention_days: int
+    :ivar access_tier_inferred:
+    :vartype access_tier_inferred: bool
+    :ivar customer_provided_key_sha256:
+    :vartype customer_provided_key_sha256: str
+    :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
+    :vartype encryption_scope: str
+    :ivar access_tier_change_time:
+    :vartype access_tier_change_time: ~datetime.datetime
+    :ivar tag_count:
+    :vartype tag_count: int
+    :ivar expires_on:
+    :vartype expires_on: ~datetime.datetime
+    :ivar is_sealed:
+    :vartype is_sealed: bool
+    :ivar last_accessed_on:
+    :vartype last_accessed_on: ~datetime.datetime
+    :ivar delete_time:
+    :vartype delete_time: ~datetime.datetime
+    """
+
+    _validation = {
+        "last_modified": {"required": True},
+        "etag": {"required": True},
+    }
+
+    _attribute_map = {
+        "creation_time": {"key": "Creation-Time", "type": "rfc-1123"},
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+        "content_length": {"key": "Content-Length", "type": "int"},
+        "content_type": {"key": "Content-Type", "type": "str"},
+        "content_encoding": {"key": "Content-Encoding", "type": "str"},
+        "content_language": {"key": "Content-Language", "type": "str"},
+        "content_md5": {"key": "Content-MD5", "type": "bytearray"},
+        "content_disposition": {"key": "Content-Disposition", "type": "str"},
+        "cache_control": {"key": "Cache-Control", "type": "str"},
+        "blob_sequence_number": {"key": "x-ms-blob-sequence-number", "type": "int"},
+        "copy_id": {"key": "CopyId", "type": "str"},
+        "copy_source": {"key": "CopySource", "type": "str"},
+        "copy_progress": {"key": "CopyProgress", "type": "str"},
+        "copy_completion_time": {"key": "CopyCompletionTime", "type": "rfc-1123"},
+        "copy_status_description": {"key": "CopyStatusDescription", "type": "str"},
+        "server_encrypted": {"key": "ServerEncrypted", "type": "bool"},
+        "incremental_copy": {"key": "IncrementalCopy", "type": "bool"},
+        "destination_snapshot": {"key": "DestinationSnapshot", "type": "str"},
+        "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"},
+        "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"},
+        "access_tier_inferred": {"key": "AccessTierInferred", "type": "bool"},
+        "customer_provided_key_sha256": {"key": "CustomerProvidedKeySha256", "type": "str"},
+        "encryption_scope": {"key": "EncryptionScope", "type": "str"},
+        "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"},
+        "tag_count": {"key": "TagCount", "type": "int"},
+        "expires_on": {"key": "Expiry-Time", "type": "rfc-1123"},
+        "is_sealed": {"key": "Sealed", "type": "bool"},
+        "last_accessed_on": {"key": "LastAccessTime", "type": "rfc-1123"},
+        "delete_time": {"key": "DeleteTime", "type": "rfc-1123"},
+    }
+    _xml_map = {"name": "Properties"}
+
+    def __init__(  # pylint: disable=too-many-locals
+        self,
+        *,
+        last_modified: datetime.datetime,
+        etag: str,
+        creation_time: Optional[datetime.datetime] = None,
+        content_length: Optional[int] = None,
+        content_type: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_md5: Optional[bytes] = None,
+        content_disposition: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        blob_sequence_number: Optional[int] = None,
+        copy_id: Optional[str] = None,
+        copy_source: Optional[str] = None,
+        copy_progress: Optional[str] = None,
+        copy_completion_time: Optional[datetime.datetime] = None,
+        copy_status_description: Optional[str] = None,
+        server_encrypted: Optional[bool] = None,
+        incremental_copy: Optional[bool] = None,
+        destination_snapshot: Optional[str] = None,
+        deleted_time: Optional[datetime.datetime] = None,
+        remaining_retention_days: Optional[int] = None,
+        access_tier_inferred: Optional[bool] = None,
+        customer_provided_key_sha256: Optional[str] = None,
+        encryption_scope: Optional[str] = None,
+        access_tier_change_time: Optional[datetime.datetime] = None,
+        tag_count: Optional[int] = None,
+        expires_on: Optional[datetime.datetime] = None,
+        is_sealed: Optional[bool] = None,
+        last_accessed_on: Optional[datetime.datetime] = None,
+        delete_time: Optional[datetime.datetime] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword creation_time:
+        :paramtype creation_time: ~datetime.datetime
+        :keyword last_modified: Required.
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag: Required.
+        :paramtype etag: str
+        :keyword content_length: Size in bytes.
+        :paramtype content_length: int
+        :keyword content_type:
+        :paramtype content_type: str
+        :keyword content_encoding:
+        :paramtype content_encoding: str
+        :keyword content_language:
+        :paramtype content_language: str
+        :keyword content_md5:
+        :paramtype content_md5: bytes
+        :keyword content_disposition:
+        :paramtype content_disposition: str
+        :keyword cache_control:
+        :paramtype cache_control: str
+        :keyword blob_sequence_number:
+        :paramtype blob_sequence_number: int
+        :keyword copy_id:
+        :paramtype copy_id: str
+        :keyword copy_source:
+        :paramtype copy_source: str
+        :keyword copy_progress:
+        :paramtype copy_progress: str
+        :keyword copy_completion_time:
+        :paramtype copy_completion_time: ~datetime.datetime
+        :keyword copy_status_description:
+        :paramtype copy_status_description: str
+        :keyword server_encrypted:
+        :paramtype server_encrypted: bool
+        :keyword incremental_copy:
+        :paramtype incremental_copy: bool
+        :keyword destination_snapshot:
+        :paramtype destination_snapshot: str
+        :keyword deleted_time:
+        :paramtype deleted_time: ~datetime.datetime
+        :keyword remaining_retention_days:
+        :paramtype remaining_retention_days: int
+        :keyword access_tier_inferred:
+        :paramtype access_tier_inferred: bool
+        :keyword customer_provided_key_sha256:
+        :paramtype customer_provided_key_sha256: str
+        :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
+        :paramtype encryption_scope: str
+        :keyword access_tier_change_time:
+        :paramtype access_tier_change_time: ~datetime.datetime
+        :keyword tag_count:
+        :paramtype tag_count: int
+        :keyword expires_on:
+        :paramtype expires_on: ~datetime.datetime
+        :keyword is_sealed:
+        :paramtype is_sealed: bool
+        :keyword last_accessed_on:
+        :paramtype last_accessed_on: ~datetime.datetime
+        :keyword delete_time:
+        :paramtype delete_time: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.creation_time = creation_time
+        self.last_modified = last_modified
+        self.etag = etag
+        self.content_length = content_length
+        self.content_type = content_type
+        self.content_encoding = content_encoding
+        self.content_language = content_language
+        self.content_md5 = content_md5
+        self.content_disposition = content_disposition
+        self.cache_control = cache_control
+        self.blob_sequence_number = blob_sequence_number
+        self.copy_id = copy_id
+        self.copy_source = copy_source
+        self.copy_progress = copy_progress
+        self.copy_completion_time = copy_completion_time
+        self.copy_status_description = copy_status_description
+        self.server_encrypted = server_encrypted
+        self.incremental_copy = incremental_copy
+        self.destination_snapshot = destination_snapshot
+        self.deleted_time = deleted_time
+        self.remaining_retention_days = remaining_retention_days
+        self.access_tier_inferred = access_tier_inferred
+        self.customer_provided_key_sha256 = customer_provided_key_sha256
+        self.encryption_scope = encryption_scope
+        self.access_tier_change_time = access_tier_change_time
+        self.tag_count = tag_count
+        self.expires_on = expires_on
+        self.is_sealed = is_sealed
+        self.last_accessed_on = last_accessed_on
+        self.delete_time = delete_time
+
+
+class CpkInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+     provided in the request. If not specified, encryption is performed with the root account
+     encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+    :vartype encryption_key: str
+    :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided
+     if the x-ms-encryption-key header is provided.
+    :vartype encryption_key_sha256: str
+    :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently,
+     the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
+     provided. Default value is "AES256".
+    :vartype encryption_algorithm: str
+    """
+
+    _attribute_map = {
+        "encryption_key": {"key": "encryptionKey", "type": "str"},
+        "encryption_key_sha256": {"key": "encryptionKeySha256", "type": "str"},
+        "encryption_algorithm": {"key": "encryptionAlgorithm", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        encryption_key: Optional[str] = None,
+        encryption_key_sha256: Optional[str] = None,
+        encryption_algorithm: Optional[Literal["AES256"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+         provided in the request. If not specified, encryption is performed with the root account
+         encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+        :paramtype encryption_key: str
+        :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be
+         provided if the x-ms-encryption-key header is provided.
+        :paramtype encryption_key_sha256: str
+        :keyword encryption_algorithm: The algorithm used to produce the encryption key hash.
+         Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+         header is provided. Default value is "AES256".
+        :paramtype encryption_algorithm: str
+        """
+        super().__init__(**kwargs)
+        self.encryption_key = encryption_key
+        self.encryption_key_sha256 = encryption_key_sha256
+        self.encryption_algorithm = encryption_algorithm
+
+
+class FileSystem(_serialization.Model):
+    """FileSystem.
+
+    :ivar name:
+    :vartype name: str
+    :ivar last_modified:
+    :vartype last_modified: str
+    :ivar e_tag:
+    :vartype e_tag: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "last_modified": {"key": "lastModified", "type": "str"},
+        "e_tag": {"key": "eTag", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        last_modified: Optional[str] = None,
+        e_tag: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword last_modified:
+        :paramtype last_modified: str
+        :keyword e_tag:
+        :paramtype e_tag: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.last_modified = last_modified
+        self.e_tag = e_tag
+
+
+class FileSystemList(_serialization.Model):
+    """FileSystemList.
+
+    :ivar filesystems:
+    :vartype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+    """
+
+    _attribute_map = {
+        "filesystems": {"key": "filesystems", "type": "[FileSystem]"},
+    }
+
+    def __init__(self, *, filesystems: Optional[List["_models.FileSystem"]] = None, **kwargs: Any) -> None:
+        """
+        :keyword filesystems:
+        :paramtype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+        """
+        super().__init__(**kwargs)
+        self.filesystems = filesystems
+
+
+class LeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and
+     matches this ID.
+    :vartype lease_id: str
+    """
+
+    _attribute_map = {
+        "lease_id": {"key": "leaseId", "type": "str"},
+    }
+
+    def __init__(self, *, lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active
+         and matches this ID.
+        :paramtype lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.lease_id = lease_id
+
+
+class ListBlobsHierarchySegmentResponse(_serialization.Model):
+    """An enumeration of blobs.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar container_name: Required.
+    :vartype container_name: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar delimiter:
+    :vartype delimiter: str
+    :ivar segment: Required.
+    :vartype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "container_name": {"required": True},
+        "segment": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "delimiter": {"key": "Delimiter", "type": "str"},
+        "segment": {"key": "Segment", "type": "BlobHierarchyListSegment"},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        container_name: str,
+        segment: "_models.BlobHierarchyListSegment",
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        delimiter: Optional[str] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword container_name: Required.
+        :paramtype container_name: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword delimiter:
+        :paramtype delimiter: str
+        :keyword segment: Required.
+        :paramtype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.container_name = container_name
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.delimiter = delimiter
+        self.segment = segment
+        self.next_marker = next_marker
+
+
+class ModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar if_modified_since: Specify this header value to operate only on a blob if it has been
+     modified since the specified date/time.
+    :vartype if_modified_since: ~datetime.datetime
+    :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not
+     been modified since the specified date/time.
+    :vartype if_unmodified_since: ~datetime.datetime
+    :ivar if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype if_match: str
+    :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value.
+    :vartype if_none_match: str
+    """
+
+    _attribute_map = {
+        "if_modified_since": {"key": "ifModifiedSince", "type": "rfc-1123"},
+        "if_unmodified_since": {"key": "ifUnmodifiedSince", "type": "rfc-1123"},
+        "if_match": {"key": "ifMatch", "type": "str"},
+        "if_none_match": {"key": "ifNoneMatch", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        if_modified_since: Optional[datetime.datetime] = None,
+        if_unmodified_since: Optional[datetime.datetime] = None,
+        if_match: Optional[str] = None,
+        if_none_match: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword if_modified_since: Specify this header value to operate only on a blob if it has been
+         modified since the specified date/time.
+        :paramtype if_modified_since: ~datetime.datetime
+        :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not
+         been modified since the specified date/time.
+        :paramtype if_unmodified_since: ~datetime.datetime
+        :keyword if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype if_match: str
+        :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching
+         value.
+        :paramtype if_none_match: str
+        """
+        super().__init__(**kwargs)
+        self.if_modified_since = if_modified_since
+        self.if_unmodified_since = if_unmodified_since
+        self.if_match = if_match
+        self.if_none_match = if_none_match
+
+
+class Path(_serialization.Model):
+    """Path.
+
+    :ivar name:
+    :vartype name: str
+    :ivar is_directory:
+    :vartype is_directory: bool
+    :ivar last_modified:
+    :vartype last_modified: str
+    :ivar e_tag:
+    :vartype e_tag: str
+    :ivar content_length:
+    :vartype content_length: int
+    :ivar owner:
+    :vartype owner: str
+    :ivar group:
+    :vartype group: str
+    :ivar permissions:
+    :vartype permissions: str
+    :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
+    :vartype encryption_scope: str
+    :ivar creation_time:
+    :vartype creation_time: str
+    :ivar expiry_time:
+    :vartype expiry_time: str
+    :ivar encryption_context:
+    :vartype encryption_context: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "is_directory": {"key": "isDirectory", "type": "bool"},
+        "last_modified": {"key": "lastModified", "type": "str"},
+        "e_tag": {"key": "eTag", "type": "str"},
+        "content_length": {"key": "contentLength", "type": "int"},
+        "owner": {"key": "owner", "type": "str"},
+        "group": {"key": "group", "type": "str"},
+        "permissions": {"key": "permissions", "type": "str"},
+        "encryption_scope": {"key": "EncryptionScope", "type": "str"},
+        "creation_time": {"key": "creationTime", "type": "str"},
+        "expiry_time": {"key": "expiryTime", "type": "str"},
+        "encryption_context": {"key": "EncryptionContext", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        is_directory: bool = False,
+        last_modified: Optional[str] = None,
+        e_tag: Optional[str] = None,
+        content_length: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        encryption_scope: Optional[str] = None,
+        creation_time: Optional[str] = None,
+        expiry_time: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword is_directory:
+        :paramtype is_directory: bool
+        :keyword last_modified:
+        :paramtype last_modified: str
+        :keyword e_tag:
+        :paramtype e_tag: str
+        :keyword content_length:
+        :paramtype content_length: int
+        :keyword owner:
+        :paramtype owner: str
+        :keyword group:
+        :paramtype group: str
+        :keyword permissions:
+        :paramtype permissions: str
+        :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
+        :paramtype encryption_scope: str
+        :keyword creation_time:
+        :paramtype creation_time: str
+        :keyword expiry_time:
+        :paramtype expiry_time: str
+        :keyword encryption_context:
+        :paramtype encryption_context: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.is_directory = is_directory
+        self.last_modified = last_modified
+        self.e_tag = e_tag
+        self.content_length = content_length
+        self.owner = owner
+        self.group = group
+        self.permissions = permissions
+        self.encryption_scope = encryption_scope
+        self.creation_time = creation_time
+        self.expiry_time = expiry_time
+        self.encryption_context = encryption_context
+
+
+class PathHTTPHeaders(_serialization.Model):
+    """Parameter group.
+
+    :ivar cache_control: Optional. Sets the blob's cache control. If specified, this property is
+     stored with the blob and returned with a read request.
+    :vartype cache_control: str
+    :ivar content_encoding: Optional. Sets the blob's content encoding. If specified, this property
+     is stored with the blob and returned with a read request.
+    :vartype content_encoding: str
+    :ivar content_language: Optional. Set the blob's content language. If specified, this property
+     is stored with the blob and returned with a read request.
+    :vartype content_language: str
+    :ivar content_disposition: Optional. Sets the blob's Content-Disposition header.
+    :vartype content_disposition: str
+    :ivar content_type: Optional. Sets the blob's content type. If specified, this property is
+     stored with the blob and returned with a read request.
+    :vartype content_type: str
+    :ivar content_md5: Specify the transactional md5 for the body, to be validated by the service.
+    :vartype content_md5: bytes
+    :ivar transactional_content_hash: Specify the transactional md5 for the body, to be validated
+     by the service.
+    :vartype transactional_content_hash: bytes
+    """
+
+    _attribute_map = {
+        "cache_control": {"key": "cacheControl", "type": "str"},
+        "content_encoding": {"key": "contentEncoding", "type": "str"},
+        "content_language": {"key": "contentLanguage", "type": "str"},
+        "content_disposition": {"key": "contentDisposition", "type": "str"},
+        "content_type": {"key": "contentType", "type": "str"},
+        "content_md5": {"key": "contentMD5", "type": "bytearray"},
+        "transactional_content_hash": {"key": "transactionalContentHash", "type": "bytearray"},
+    }
+
+    def __init__(
+        self,
+        *,
+        cache_control: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_type: Optional[str] = None,
+        content_md5: Optional[bytes] = None,
+        transactional_content_hash: Optional[bytes] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword cache_control: Optional. Sets the blob's cache control. If specified, this property is
+         stored with the blob and returned with a read request.
+        :paramtype cache_control: str
+        :keyword content_encoding: Optional. Sets the blob's content encoding. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype content_encoding: str
+        :keyword content_language: Optional. Set the blob's content language. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype content_language: str
+        :keyword content_disposition: Optional. Sets the blob's Content-Disposition header.
+        :paramtype content_disposition: str
+        :keyword content_type: Optional. Sets the blob's content type. If specified, this property is
+         stored with the blob and returned with a read request.
+        :paramtype content_type: str
+        :keyword content_md5: Specify the transactional md5 for the body, to be validated by the
+         service.
+        :paramtype content_md5: bytes
+        :keyword transactional_content_hash: Specify the transactional md5 for the body, to be
+         validated by the service.
+        :paramtype transactional_content_hash: bytes
+        """
+        super().__init__(**kwargs)
+        self.cache_control = cache_control
+        self.content_encoding = content_encoding
+        self.content_language = content_language
+        self.content_disposition = content_disposition
+        self.content_type = content_type
+        self.content_md5 = content_md5
+        self.transactional_content_hash = transactional_content_hash
+
+
+class PathList(_serialization.Model):
+    """PathList.
+
+    :ivar paths:
+    :vartype paths: list[~azure.storage.filedatalake.models.Path]
+    """
+
+    _attribute_map = {
+        "paths": {"key": "paths", "type": "[Path]"},
+    }
+
+    def __init__(self, *, paths: Optional[List["_models.Path"]] = None, **kwargs: Any) -> None:
+        """
+        :keyword paths:
+        :paramtype paths: list[~azure.storage.filedatalake.models.Path]
+        """
+        super().__init__(**kwargs)
+        self.paths = paths
+
+
+class SetAccessControlRecursiveResponse(_serialization.Model):
+    """SetAccessControlRecursiveResponse.
+
+    :ivar directories_successful:
+    :vartype directories_successful: int
+    :ivar files_successful:
+    :vartype files_successful: int
+    :ivar failure_count:
+    :vartype failure_count: int
+    :ivar failed_entries:
+    :vartype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+    """
+
+    _attribute_map = {
+        "directories_successful": {"key": "directoriesSuccessful", "type": "int"},
+        "files_successful": {"key": "filesSuccessful", "type": "int"},
+        "failure_count": {"key": "failureCount", "type": "int"},
+        "failed_entries": {"key": "failedEntries", "type": "[AclFailedEntry]"},
+    }
+
+    def __init__(
+        self,
+        *,
+        directories_successful: Optional[int] = None,
+        files_successful: Optional[int] = None,
+        failure_count: Optional[int] = None,
+        failed_entries: Optional[List["_models.AclFailedEntry"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword directories_successful:
+        :paramtype directories_successful: int
+        :keyword files_successful:
+        :paramtype files_successful: int
+        :keyword failure_count:
+        :paramtype failure_count: int
+        :keyword failed_entries:
+        :paramtype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+        """
+        super().__init__(**kwargs)
+        self.directories_successful = directories_successful
+        self.files_successful = files_successful
+        self.failure_count = failure_count
+        self.failed_entries = failed_entries
+
+
+class SourceModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype source_if_match: str
+    :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching
+     value.
+    :vartype source_if_none_match: str
+    :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has
+     been modified since the specified date/time.
+    :vartype source_if_modified_since: ~datetime.datetime
+    :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has
+     not been modified since the specified date/time.
+    :vartype source_if_unmodified_since: ~datetime.datetime
+    """
+
+    _attribute_map = {
+        "source_if_match": {"key": "sourceIfMatch", "type": "str"},
+        "source_if_none_match": {"key": "sourceIfNoneMatch", "type": "str"},
+        "source_if_modified_since": {"key": "sourceIfModifiedSince", "type": "rfc-1123"},
+        "source_if_unmodified_since": {"key": "sourceIfUnmodifiedSince", "type": "rfc-1123"},
+    }
+
+    def __init__(
+        self,
+        *,
+        source_if_match: Optional[str] = None,
+        source_if_none_match: Optional[str] = None,
+        source_if_modified_since: Optional[datetime.datetime] = None,
+        source_if_unmodified_since: Optional[datetime.datetime] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype source_if_match: str
+        :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a
+         matching value.
+        :paramtype source_if_none_match: str
+        :keyword source_if_modified_since: Specify this header value to operate only on a blob if it
+         has been modified since the specified date/time.
+        :paramtype source_if_modified_since: ~datetime.datetime
+        :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it
+         has not been modified since the specified date/time.
+        :paramtype source_if_unmodified_since: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.source_if_match = source_if_match
+        self.source_if_none_match = source_if_none_match
+        self.source_if_modified_since = source_if_modified_since
+        self.source_if_unmodified_since = source_if_unmodified_since
+
+
+class StorageError(_serialization.Model):
+    """StorageError.
+
+    :ivar error: The service error response object.
+    :vartype error: ~azure.storage.filedatalake.models.StorageErrorError
+    """
+
+    _attribute_map = {
+        "error": {"key": "error", "type": "StorageErrorError"},
+    }
+
+    def __init__(self, *, error: Optional["_models.StorageErrorError"] = None, **kwargs: Any) -> None:
+        """
+        :keyword error: The service error response object.
+        :paramtype error: ~azure.storage.filedatalake.models.StorageErrorError
+        """
+        super().__init__(**kwargs)
+        self.error = error
+
+
+class StorageErrorError(_serialization.Model):
+    """The service error response object.
+
+    :ivar code: The service error code.
+    :vartype code: str
+    :ivar message: The service error message.
+    :vartype message: str
+    """
+
+    _attribute_map = {
+        "code": {"key": "Code", "type": "str"},
+        "message": {"key": "Message", "type": "str"},
+    }
+
+    def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword code: The service error code.
+        :paramtype code: str
+        :keyword message: The service error message.
+        :paramtype message: str
+        """
+        super().__init__(**kwargs)
+        self.code = code
+        self.message = message
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py
new file mode 100644
index 00000000..56a7ece3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._file_system_operations import FileSystemOperations  # type: ignore
+from ._path_operations import PathOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "FileSystemOperations",
+    "PathOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py
new file mode 100644
index 00000000..235402a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py
@@ -0,0 +1,888 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    properties: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_properties_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    properties: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str, *, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_paths_request(
+    url: str,
+    *,
+    recursive: bool,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    continuation: Optional[str] = None,
+    path: Optional[str] = None,
+    max_results: Optional[int] = None,
+    upn: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if path is not None:
+        _params["directory"] = _SERIALIZER.query("path", path, "str")
+    _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if upn is not None:
+        _params["upn"] = _SERIALIZER.query("upn", upn, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_blob_hierarchy_segment_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    delimiter: Optional[str] = None,
+    marker: Optional[str] = None,
+    max_results: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+    showonly: Literal["deleted"] = "deleted",
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if delimiter is not None:
+        _params["delimiter"] = _SERIALIZER.query("delimiter", delimiter, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if showonly is not None:
+        _params["showonly"] = _SERIALIZER.query("showonly", showonly, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FileSystemOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`file_system` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create FileSystem.
+
+        Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+        operation fails.  This operation does not support conditional HTTP requests.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set FileSystem Properties.
+
+        Set properties for the FileSystem.  This operation supports conditional HTTP requests.  For
+        more information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get FileSystem Properties.
+
+        All system and user-defined filesystem properties are specified in the response headers.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete FileSystem.
+
+        Marks the FileSystem for deletion.  When a FileSystem is deleted, a FileSystem with the same
+        identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+        attempts to create a filesystem with the same identifier will fail with status code 409
+        (Conflict), with the service returning additional error information indicating that the
+        filesystem is being deleted. All other operations, including operations on any files or
+        directories within the filesystem, will fail with status code 404 (Not Found) while the
+        filesystem is being deleted. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def list_paths(
+        self,
+        recursive: bool,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        path: Optional[str] = None,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.PathList:
+        # pylint: disable=line-too-long
+        """List Paths.
+
+        List FileSystem paths and their properties.
+
+        :param recursive: Required. Required.
+        :type recursive: bool
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param path: Optional.  Filters results to paths within the specified directory. An error
+         occurs if the directory does not exist. Default value is None.
+        :type path: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :return: PathList or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.PathList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[_models.PathList] = kwargs.pop("cls", None)
+
+        _request = build_list_paths_request(
+            url=self._config.url,
+            recursive=recursive,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            continuation=continuation,
+            path=path,
+            max_results=max_results,
+            upn=upn,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        deserialized = self._deserialize("PathList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_blob_hierarchy_segment(
+        self,
+        prefix: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        showonly: Literal["deleted"] = "deleted",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Default value is None.
+        :type delimiter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+        :param showonly: Include this parameter to specify one or more datasets to include in the
+         response. Known values are "deleted" and None. Default value is "deleted".
+        :type showonly: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            delimiter=delimiter,
+            marker=marker,
+            max_results=max_results,
+            include=include,
+            showonly=showonly,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py
new file mode 100644
index 00000000..b6d6a0a9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py
@@ -0,0 +1,2845 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    resource: Optional[Union[str, _models.PathResourceType]] = None,
+    continuation: Optional[str] = None,
+    mode: Optional[Union[str, _models.PathRenameMode]] = None,
+    cache_control: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    rename_source: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    source_lease_id: Optional[str] = None,
+    properties: Optional[str] = None,
+    permissions: Optional[str] = None,
+    umask: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    acl: Optional[str] = None,
+    proposed_lease_id: Optional[str] = None,
+    lease_duration: Optional[int] = None,
+    expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+    expires_on: Optional[str] = None,
+    encryption_context: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if resource is not None:
+        _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if mode is not None:
+        _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if rename_source is not None:
+        _headers["x-ms-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if source_lease_id is not None:
+        _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if umask is not None:
+        _headers["x-ms-umask"] = _SERIALIZER.header("umask", umask, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if expiry_options is not None:
+        _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str")
+    if expires_on is not None:
+        _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str")
+    if encryption_context is not None:
+        _headers["x-ms-encryption-context"] = _SERIALIZER.header("encryption_context", encryption_context, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+    url: str,
+    *,
+    action: Union[str, _models.PathUpdateAction],
+    mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+    content: IO[bytes],
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    max_records: Optional[int] = None,
+    continuation: Optional[str] = None,
+    force_flag: Optional[bool] = None,
+    position: Optional[int] = None,
+    retain_uncommitted_data: Optional[bool] = None,
+    close: Optional[bool] = None,
+    content_length: Optional[int] = None,
+    content_md5: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    cache_control: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    properties: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    permissions: Optional[str] = None,
+    acl: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if max_records is not None:
+        _params["maxRecords"] = _SERIALIZER.query("max_records", max_records, "int", minimum=1)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+    if force_flag is not None:
+        _params["forceFlag"] = _SERIALIZER.query("force_flag", force_flag, "bool")
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if retain_uncommitted_data is not None:
+        _params["retainUncommittedData"] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, "bool")
+    if close is not None:
+        _params["close"] = _SERIALIZER.query("close", close, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_lease_request(
+    url: str,
+    *,
+    x_ms_lease_action: Union[str, _models.PathLeaseAction],
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    x_ms_lease_break_period: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    proposed_lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    x_ms_lease_duration: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("x_ms_lease_action", x_ms_lease_action, "str")
+    if x_ms_lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("x_ms_lease_duration", x_ms_lease_duration, "int")
+    if x_ms_lease_break_period is not None:
+        _headers["x-ms-lease-break-period"] = _SERIALIZER.header(
+            "x_ms_lease_break_period", x_ms_lease_break_period, "int"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_read_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    x_ms_range_get_content_md5: Optional[bool] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if range is not None:
+        _headers["Range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if x_ms_range_get_content_md5 is not None:
+        _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header(
+            "x_ms_range_get_content_md5", x_ms_range_get_content_md5, "bool"
+        )
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+    upn: Optional[bool] = None,
+    lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if action is not None:
+        _params["action"] = _SERIALIZER.query("action", action, "str")
+    if upn is not None:
+        _params["upn"] = _SERIALIZER.query("upn", upn, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    recursive: Optional[bool] = None,
+    continuation: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    paginated: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if recursive is not None:
+        _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if paginated is not None:
+        _params["paginated"] = _SERIALIZER.query("paginated", paginated, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_control_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    permissions: Optional[str] = None,
+    acl: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_control_recursive_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+    timeout: Optional[int] = None,
+    continuation: Optional[str] = None,
+    force_flag: Optional[bool] = None,
+    max_records: Optional[int] = None,
+    acl: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["setAccessControlRecursive"] = kwargs.pop(
+        "action", _params.pop("action", "setAccessControlRecursive")
+    )
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+    if force_flag is not None:
+        _params["forceFlag"] = _SERIALIZER.query("force_flag", force_flag, "bool")
+    if max_records is not None:
+        _params["maxRecords"] = _SERIALIZER.query("max_records", max_records, "int", minimum=1)
+
+    # Construct headers
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_flush_data_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    position: Optional[int] = None,
+    retain_uncommitted_data: Optional[bool] = None,
+    close: Optional[bool] = None,
+    content_length: Optional[int] = None,
+    content_md5: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+    lease_duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    cache_control: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if retain_uncommitted_data is not None:
+        _params["retainUncommittedData"] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, "bool")
+    if close is not None:
+        _params["close"] = _SERIALIZER.query("close", close, "bool")
+
+    # Construct headers
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if lease_action is not None:
+        _headers["x-ms-lease-action"] = _SERIALIZER.header("lease_action", lease_action, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_append_data_request(
+    url: str,
+    *,
+    content: IO[bytes],
+    position: Optional[int] = None,
+    timeout: Optional[int] = None,
+    content_length: Optional[int] = None,
+    transactional_content_hash: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+    lease_duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    flush: Optional[bool] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if flush is not None:
+        _params["flush"] = _SERIALIZER.query("flush", flush, "bool")
+
+    # Construct headers
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if transactional_content_hash is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_hash", transactional_content_hash, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if lease_action is not None:
+        _headers["x-ms-lease-action"] = _SERIALIZER.header("lease_action", lease_action, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_set_expiry_request(
+    url: str,
+    *,
+    expiry_options: Union[str, _models.PathExpiryOptions],
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    expires_on: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str")
+    if expires_on is not None:
+        _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_undelete_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    undelete_source: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if undelete_source is not None:
+        _headers["x-ms-undelete-source"] = _SERIALIZER.header("undelete_source", undelete_source, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class PathOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`path` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        resource: Optional[Union[str, _models.PathResourceType]] = None,
+        continuation: Optional[str] = None,
+        mode: Optional[Union[str, _models.PathRenameMode]] = None,
+        rename_source: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        properties: Optional[str] = None,
+        permissions: Optional[str] = None,
+        umask: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        acl: Optional[str] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_duration: Optional[int] = None,
+        expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+        expires_on: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create File | Create Directory | Rename File | Rename Directory.
+
+        Create or rename a file or directory.    By default, the destination is overwritten and if the
+        destination already exists and has a lease the lease is broken.  This operation supports
+        conditional HTTP requests.  For more information, see `Specifying Conditional Headers for Blob
+        Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+        To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param resource: Required only for Create File and Create Directory. The value must be "file"
+         or "directory". Known values are: "directory" and "file". Default value is None.
+        :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+         behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+         will be "posix". Known values are: "legacy" and "posix". Default value is None.
+        :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+        :param rename_source: An optional file or directory to be renamed.  The value must have the
+         following format: "/{filesystem}/{path}".  If "x-ms-properties" is specified, the properties
+         will overwrite the existing properties; otherwise, the existing properties will be preserved.
+         This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+         characters in the ISO-8859-1 character set. Default value is None.
+        :type rename_source: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+         When creating a file or directory and the parent folder does not have a default ACL, the umask
+         restricts the permissions of the file or directory to be created.  The resulting permission is
+         given by p bitwise and not u, where p is the permission and u is the umask.  For example, if p
+         is 0777 and u is 0057, then the resulting permission is 0720.  The default permission is 0777
+         for a directory and 0666 for a file.  The default umask is 0027.  The umask must be specified
+         in 4-digit octal notation (e.g. 0766). Default value is None.
+        :type umask: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Default value is None.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :param encryption_context: Specifies the encryption context to set on the file. Default value
+         is None.
+        :type encryption_context: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _cache_control = None
+        _content_encoding = None
+        _content_language = None
+        _content_disposition = None
+        _content_type_parameter = None
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=resource,
+            continuation=continuation,
+            mode=mode,
+            cache_control=_cache_control,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            content_disposition=_content_disposition,
+            content_type_parameter=_content_type_parameter,
+            rename_source=rename_source,
+            lease_id=_lease_id,
+            source_lease_id=source_lease_id,
+            properties=properties,
+            permissions=permissions,
+            umask=umask,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            owner=owner,
+            group=group,
+            acl=acl,
+            proposed_lease_id=proposed_lease_id,
+            lease_duration=lease_duration,
+            expiry_options=expiry_options,
+            expires_on=expires_on,
+            encryption_context=encryption_context,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def update(
+        self,
+        action: Union[str, _models.PathUpdateAction],
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        body: IO[bytes],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        max_records: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        properties: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Optional[_models.SetAccessControlRecursiveResponse]:
+        # pylint: disable=line-too-long
+        """Append Data | Flush Data | Set Properties | Set Access Control.
+
+        Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+        sets properties for a file or directory, or sets access control for a file or directory. Data
+        can only be appended to a file. Concurrent writes to the same file using multiple clients are
+        not supported. This operation supports conditional HTTP requests. For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+         flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+         directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+         a file or directory, or  "setAccessControlRecursive" to set the access control list for a
+         directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+         order to use access control.  Also note that the Access Control List (ACL) includes permissions
+         for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+         are mutually exclusive. Known values are: "append", "flush", "setProperties",
+         "setAccessControl", and "setAccessControlRecursive". Required.
+        :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+         maximum number of files or directories on which the acl change will be applied. If omitted or
+         greater than 2,000, the request will process up to 2,000 items. Default value is None.
+        :type max_records: int
+        :param continuation: Optional. The number of paths processed with each invocation is limited.
+         If the number of paths to be processed exceeds this limit, a continuation token is returned in
+         the response header x-ms-continuation. When a continuation token is  returned in the response,
+         it must be percent-encoded and specified in a subsequent invocation of
+         setAccessControlRecursive operation. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: SetAccessControlRecursiveResponse or None or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[Optional[_models.SetAccessControlRecursiveResponse]] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_update_request(
+            url=self._config.url,
+            action=action,
+            mode=mode,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            max_records=max_records,
+            continuation=continuation,
+            force_flag=force_flag,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            properties=properties,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        deserialized = None
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+            deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if response.status_code == 202:
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        x_ms_lease_action: Union[str, _models.PathLeaseAction],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        x_ms_lease_break_period: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Lease Path.
+
+        Create and manage a lease to restrict write and delete access to the path. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+         and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+         to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+         lease break period is allowed to elapse, during which time no lease operation except break and
+         release can be performed on the file. When a lease is successfully broken, the response
+         indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+         the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+         change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+         existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. Known values
+         are: "acquire", "break", "change", "renew", "release", and "break". Required.
+        :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+         and  specifies the break period of the lease in seconds.  The lease break  duration must be
+         between 0 and 60 seconds. Default value is None.
+        :type x_ms_lease_break_period: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_lease_request(
+            url=self._config.url,
+            x_ms_lease_action=x_ms_lease_action,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            x_ms_lease_break_period=x_ms_lease_break_period,
+            lease_id=_lease_id,
+            proposed_lease_id=proposed_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            x_ms_lease_duration=self._config.x_ms_lease_duration,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 201, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 201:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 202:
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-time"] = self._deserialize("str", response.headers.get("x-ms-lease-time"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def read(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        x_ms_range_get_content_md5: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """Read File.
+
+        Read the contents of a file.  For read operations, range requests are supported. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+         to be retrieved. Default value is None.
+        :type range: str
+        :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+         together with the Range header, the service returns the MD5 hash for the range, as long as the
+         range is less than or equal to 4MB in size. If this header is specified without the Range
+         header, the service returns status code 400 (Bad Request). If this header is set to true when
+         the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default
+         value is None.
+        :type x_ms_range_get_content_md5: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_read_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            x_ms_range_get_content_md5=x_ms_range_get_content_md5,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        if response.status_code == 206:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-content-md5"] = self._deserialize("str", response.headers.get("x-ms-content-md5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+        upn: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get Properties | Get Status | Get Access Control List.
+
+        Get Properties returns all system and user defined properties for a path. Get Status returns
+        all system defined properties for a path. Get Access Control List returns the access control
+        list for a path. This operation supports conditional HTTP requests.  For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param action: Optional. If the value is "getStatus" only the system defined properties for the
+         path are returned. If the value is "getAccessControl" the access control list is returned in
+         the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+         properties are returned. Known values are: "getAccessControl" and "getStatus". Default value is
+         None.
+        :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            action=action,
+            upn=upn,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-permissions"] = self._deserialize("str", response.headers.get("x-ms-permissions"))
+        response_headers["x-ms-acl"] = self._deserialize("str", response.headers.get("x-ms-acl"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        recursive: Optional[bool] = None,
+        continuation: Optional[str] = None,
+        paginated: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete File | Delete Directory.
+
+        Delete the file or directory. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param recursive: Required. Default value is None.
+        :type recursive: bool
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param paginated: If true, paginated behavior will be seen. Pagination is for the recursive ACL
+         checks as a POSIX requirement in the server and Delete in an atomic operation once the ACL
+         checks are completed. If false or missing, normal default behavior will kick in, which may
+         timeout in case of very large directories due to recursive ACL checks. This new parameter is
+         introduced for backward compatibility. Default value is None.
+        :type paginated: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            recursive=recursive,
+            continuation=continuation,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            paginated=paginated,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-deletion-id"] = self._deserialize("str", response.headers.get("x-ms-deletion-id"))
+
+        if response.status_code == 202:
+            response_headers["Date"] = self._deserialize("str", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_access_control(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_access_control_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_access_control_recursive(
+        self,
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        max_records: Optional[int] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.SetAccessControlRecursiveResponse:
+        # pylint: disable=line-too-long
+        """Set the access control list for a path and sub-paths.
+
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param max_records: Optional. It specifies the maximum number of files or directories on which
+         the acl change will be applied. If omitted or greater than 2,000, the request will process up
+         to 2,000 items. Default value is None.
+        :type max_records: int
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: SetAccessControlRecursiveResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControlRecursive"] = kwargs.pop(
+            "action", _params.pop("action", "setAccessControlRecursive")
+        )
+        cls: ClsType[_models.SetAccessControlRecursiveResponse] = kwargs.pop("cls", None)
+
+        _request = build_set_access_control_recursive_request(
+            url=self._config.url,
+            mode=mode,
+            timeout=timeout,
+            continuation=continuation,
+            force_flag=force_flag,
+            max_records=max_records,
+            acl=acl,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def flush_data(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_flush_data_request(
+            url=self._config.url,
+            timeout=timeout,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def append_data(  # pylint: disable=inconsistent-return-statements
+        self,
+        body: IO[bytes],
+        position: Optional[int] = None,
+        timeout: Optional[int] = None,
+        content_length: Optional[int] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        flush: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Append data to the file.
+
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param flush: If file should be flushed after the append. Default value is None.
+        :type flush: bool
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _transactional_content_hash = None
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _transactional_content_hash = path_http_headers.transactional_content_hash
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        _content = body
+
+        _request = build_append_data_request(
+            url=self._config.url,
+            position=position,
+            timeout=timeout,
+            content_length=content_length,
+            transactional_content_hash=_transactional_content_hash,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            flush=flush,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            action=action,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_expiry(  # pylint: disable=inconsistent-return-statements
+        self,
+        expiry_options: Union[str, _models.PathExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def undelete(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        undelete_source: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a path that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+         the soft deleted blob to undelete. Default value is None.
+        :type undelete_source: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            undelete_source=undelete_source,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py
new file mode 100644
index 00000000..f0baeb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py
@@ -0,0 +1,208 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, Iterable, Literal, Optional, TypeVar
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_file_systems_request(
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    continuation: Optional[str] = None,
+    max_results: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def list_file_systems(
+        self,
+        prefix: Optional[str] = None,
+        continuation: Optional[str] = None,
+        max_results: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> Iterable["_models.FileSystem"]:
+        # pylint: disable=line-too-long
+        """List FileSystems.
+
+        List filesystems and their properties in given account.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: An iterator like instance of either FileSystem or the result of cls(response)
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystem]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+        cls: ClsType[_models.FileSystemList] = kwargs.pop("cls", None)
+
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        def prepare_request(next_link=None):
+            if not next_link:
+
+                _request = build_list_file_systems_request(
+                    url=self._config.url,
+                    prefix=prefix,
+                    continuation=continuation,
+                    max_results=max_results,
+                    request_id_parameter=request_id_parameter,
+                    timeout=timeout,
+                    resource=resource,
+                    version=self._config.version,
+                    headers=_headers,
+                    params=_params,
+                )
+                _request.url = self._client.format_url(_request.url)
+
+            else:
+                _request = HttpRequest("GET", next_link)
+                _request.url = self._client.format_url(_request.url)
+                _request.method = "GET"
+            return _request
+
+        def extract_data(pipeline_response):
+            deserialized = self._deserialize("FileSystemList", pipeline_response)
+            list_of_elem = deserialized.filesystems
+            if cls:
+                list_of_elem = cls(list_of_elem)  # type: ignore
+            return None, iter(list_of_elem)
+
+        def get_next(next_link=None):
+            _request = prepare_request(next_link)
+
+            _stream = False
+            pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+                _request, stream=_stream, **kwargs
+            )
+            response = pipeline_response.http_response
+
+            if response.status_code not in [200]:
+                map_error(status_code=response.status_code, response=response, error_map=error_map)
+                error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+                raise HttpResponseError(response=response, model=error)
+
+            return pipeline_response
+
+        return ItemPaged(get_next, extract_data)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed
new file mode 100644
index 00000000..e5aff4f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py
new file mode 100644
index 00000000..1120b973
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py
@@ -0,0 +1,173 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from azure.core.paging import PageIterator
+from azure.core.exceptions import HttpResponseError
+from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \
+    return_headers_and_deserialized_path_list
+from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+from ._shared.models import DictMixin
+from ._shared.response_handlers import return_context_and_deserialized
+from ._models import PathProperties
+from ._generated.models import Path
+
+
+class DeletedPathPropertiesPaged(PageIterator):
+    """An Iterable of deleted path properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A path name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+    :ivar str container: The container that the paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+    :param callable command: Function to retrieve the next page of items.
+    """
+    def __init__(
+            self, command,
+            container=None,
+            prefix=None,
+            results_per_page=None,
+            continuation_token=None,
+            delimiter=None,
+            location_mode=None):
+        super(DeletedPathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                max_results=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobItemInternal):
+            file_props = get_deleted_path_properties_from_generated_code(item)
+            file_props.file_system = self.container
+            return file_props
+        if isinstance(item, GenBlobPrefix):
+            return DirectoryPrefix(
+                container=self.container,
+                prefix=item.name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
+
+
+class DirectoryPrefix(DictMixin):
+    """Directory prefix.
+
+    :ivar str name: Name of the deleted directory.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar str file_system: The file system that the deleted paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.file_system = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class PathPropertiesPaged(PageIterator):
+    """An Iterable of Path properties.
+
+    :ivar str path: Filters the results to return only paths under the specified path.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str path: Filters the results to return only paths under the specified path.
+    :param int max_results: The maximum number of paths to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+    def __init__(
+            self, command,
+            recursive,
+            path=None,
+            max_results=None,
+            continuation_token=None,
+            upn=None):
+        super(PathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.recursive = recursive
+        self.results_per_page = max_results
+        self.path = path
+        self.upn = upn
+        self.current_page = None
+        self.path_list = None
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                self.recursive,
+                continuation=continuation_token or None,
+                path=self.path,
+                max_results=self.results_per_page,
+                upn=self.upn,
+                cls=return_headers_and_deserialized_path_list)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.path_list, self._response = get_next_return
+        self.current_page = [self._build_item(item) for item in self.path_list]
+
+        return self._response['continuation'] or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, PathProperties):
+            return item
+        if isinstance(item, Path):
+            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
+            return path
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py
new file mode 100644
index 00000000..d078d1e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py
@@ -0,0 +1,1158 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+from enum import Enum
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.storage.blob import LeaseProperties as BlobLeaseProperties
+from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
+from azure.storage.blob import ResourceTypes as BlobResourceTypes
+from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
+from azure.storage.blob import ContentSettings as BlobContentSettings
+from azure.storage.blob import AccessPolicy as BlobAccessPolicy
+from azure.storage.blob import DelimitedTextDialect as BlobDelimitedTextDialect
+from azure.storage.blob import DelimitedJsonDialect as BlobDelimitedJSON
+from azure.storage.blob import ArrowDialect as BlobArrowDialect
+from azure.storage.blob import ContainerEncryptionScope as BlobContainerEncryptionScope
+from azure.storage.blob import CustomerProvidedEncryptionKey as BlobCustomerProvidedEncryptionKey
+from azure.storage.blob._models import ContainerPropertiesPaged
+from azure.storage.blob._generated.models import Logging as GenLogging, Metrics as GenMetrics, \
+    RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule
+
+from ._shared.models import DictMixin
+from ._shared.parser import _filetime_to_datetime, _rfc_1123_to_datetime
+
+
+class FileSystemProperties(DictMixin):
+    """File System properties class.
+
+    :ivar str name:
+        Name of the filesystem.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the file system was modified.
+    :ivar str etag:
+        The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the file system.
+    :ivar str public_access: Specifies whether data in the file system may be accessed
+        publicly and the level of access.
+    :ivar bool has_immutability_policy:
+        Represents whether the file system has an immutability policy.
+    :ivar bool has_legal_hold:
+        Represents whether the file system has a legal hold.
+    :ivar dict metadata: A dict with name-value pairs to associate with the
+        file system as metadata.
+    :ivar ~azure.storage.filedatalake.EncryptionScopeOptions encryption_scope:
+        The default encryption scope configuration for the file system.
+    :ivar bool deleted:
+        Whether this file system was deleted.
+    :ivar str deleted_version:
+        The version of a deleted file system.
+
+    Returned ``FileSystemProperties`` instances expose these values through a
+    dictionary interface, for example: ``file_system_props["last_modified"]``.
+    Additionally, the file system name is available as ``file_system_props["name"]``.
+    """
+
+    def __init__(self, **kwargs):
+        self.name = None
+        self.last_modified = None
+        self.etag = None
+        self.lease = None
+        self.public_access = None
+        self.has_immutability_policy = None
+        self.has_legal_hold = None
+        self.metadata = None
+        self.deleted = None
+        self.deleted_version = None
+        default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
+        if default_encryption_scope:
+            self.encryption_scope = EncryptionScopeOptions(
+                default_encryption_scope=default_encryption_scope,
+                prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
+            )
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = generated.name
+        props.last_modified = generated.properties.last_modified
+        props.deleted = generated.deleted
+        props.deleted_version = generated.version
+        props.etag = generated.properties.etag
+        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
+        props.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
+            generated.properties.public_access)
+        props.has_immutability_policy = generated.properties.has_immutability_policy
+        props.has_legal_hold = generated.properties.has_legal_hold
+        props.metadata = generated.metadata
+        props.encryption_scope = EncryptionScopeOptions._from_generated(generated)  #pylint: disable=protected-access
+        return props
+
+    @classmethod
+    def _convert_from_container_props(cls, container_properties):
+        container_properties.__class__ = cls
+        container_properties.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
+            container_properties.public_access)
+        container_properties.lease.__class__ = LeaseProperties
+        return container_properties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+    """An Iterable of File System properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A file system name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str prefix: Filters the results to return only file systems whose names
+        begin with the specified prefix.
+    :param int results_per_page: The maximum number of file system names to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(FileSystemPropertiesPaged, self).__init__(
+            *args,
+            **kwargs
+        )
+
+    @staticmethod
+    def _build_item(item):
+        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
+
+
+class DirectoryProperties(DictMixin):
+    """
+    :ivar str name: name of the directory
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar str acl: The POSIX ACL permissions of the file or directory.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar bool deleted: if the current directory marked as deleted
+    :ivar dict metadata: Name-value pairs associated with the directory as metadata.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the directory.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the directory was modified.
+    :ivar ~datetime.datetime creation_time:
+        Indicates when the directory was created, in UTC.
+    :ivar int remaining_retention_days: The number of days that the directory will be retained
+        before being permanently deleted by the service.
+    :var ~azure.storage.filedatalake.ContentSettings content_settings:
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.etag = kwargs.get('ETag')
+        self.deleted = False
+        self.metadata = kwargs.get('metadata')
+        self.lease = LeaseProperties(**kwargs)
+        self.last_modified = kwargs.get('Last-Modified')
+        self.creation_time = kwargs.get('x-ms-creation-time')
+        self.deleted_time = None
+        self.remaining_retention_days = None
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+
+        # This is being passed directly not coming from headers
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.acl = kwargs.get('acl', None)
+
+
+class FileProperties(DictMixin):
+    """
+    :ivar str name: name of the file
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar str acl: The POSIX ACL permissions of the file or directory.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar bool deleted: if the current file marked as deleted
+    :ivar dict metadata: Name-value pairs associated with the file as metadata.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the file.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the file was modified.
+    :ivar ~datetime.datetime creation_time:
+        Indicates when the file was created, in UTC.
+    :ivar int size: size of the file
+    :ivar int remaining_retention_days: The number of days that the file will be retained
+        before being permanently deleted by the service.
+    :ivar str encryption_context: Specifies the encryption context to set on the file.
+    :var ~azure.storage.filedatalake.ContentSettings content_settings:
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.etag = kwargs.get('ETag')
+        self.deleted = False
+        self.metadata = kwargs.get('metadata')
+        self.lease = LeaseProperties(**kwargs)
+        self.last_modified = kwargs.get('Last-Modified')
+        self.creation_time = kwargs.get('x-ms-creation-time')
+        self.size = kwargs.get('Content-Length')
+        self.deleted_time = None
+        self.expiry_time = kwargs.get("x-ms-expiry-time")
+        self.remaining_retention_days = None
+        self.content_settings = ContentSettings(**kwargs)
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+
+        # This is being passed directly not coming from headers
+        self.encryption_context = kwargs.get('encryption_context')
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.acl = kwargs.get('acl', None)
+
+
+class PathProperties(DictMixin):
+    """Path properties listed by get_paths api.
+
+    :ivar str name: The full path for a file or directory.
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar datetime last_modified:  A datetime object representing the last time the directory/file was modified.
+    :ivar bool is_directory: Is the path a directory or not.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar int content_length: The size of file if the path is a file.
+    :ivar datetime creation_time: The creation time of the file/directory.
+    :ivar datetime expiry_time: The expiry time of the file/directory.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar str encryption_context: Specifies the encryption context to set on the file.
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.pop('name', None)
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.last_modified = kwargs.get('last_modified', None)
+        self.is_directory = kwargs.get('is_directory', False)
+        self.etag = kwargs.get('etag', None)
+        self.content_length = kwargs.get('content_length', None)
+        self.creation_time = kwargs.get('creation_time', None)
+        self.expiry_time = kwargs.get('expiry_time', None)
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope', None)
+        self.encryption_context = kwargs.get('x-ms-encryption-context', None)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        path_prop = PathProperties()
+        path_prop.name = generated.name
+        path_prop.owner = generated.owner
+        path_prop.group = generated.group
+        path_prop.permissions = generated.permissions
+        path_prop.last_modified = _rfc_1123_to_datetime(generated.last_modified)
+        path_prop.is_directory = bool(generated.is_directory)
+        path_prop.etag = generated.additional_properties.get('etag')
+        path_prop.content_length = generated.content_length
+        path_prop.creation_time = _filetime_to_datetime(generated.creation_time)
+        path_prop.expiry_time = _filetime_to_datetime(generated.expiry_time)
+        path_prop.encryption_scope = generated.encryption_scope
+        path_prop.encryption_context = generated.encryption_context
+        return path_prop
+
+
+class LeaseProperties(BlobLeaseProperties):
+    """DataLake Lease Properties.
+
+    :ivar str status:
+        The lease status of the file. Possible values: locked|unlocked
+    :ivar str state:
+        Lease state of the file. Possible values: available|leased|expired|breaking|broken
+    :ivar str duration:
+        When a file is leased, specifies whether the lease is of infinite or fixed duration.
+    """
+
+
+class ContentSettings(BlobContentSettings):
+    """The content settings of a file or directory.
+
+    :ivar str content_type:
+        The content type specified for the file or directory. If no content type was
+        specified, the default content type is application/octet-stream.
+    :ivar str content_encoding:
+        If the content_encoding has previously been set
+        for the file, that value is stored.
+    :ivar str content_language:
+        If the content_language has previously been set
+        for the file, that value is stored.
+    :ivar str content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the file, that value is stored.
+    :ivar str cache_control:
+        If the cache_control has previously been set for
+        the file, that value is stored.
+    :ivar bytearray content_md5:
+        If the content_md5 has been set for the file, this response
+        header is stored so that the client can check for message content
+        integrity.
+    :keyword str content_type:
+        The content type specified for the file or directory. If no content type was
+        specified, the default content type is application/octet-stream.
+    :keyword str content_encoding:
+        If the content_encoding has previously been set
+        for the file, that value is stored.
+    :keyword str content_language:
+        If the content_language has previously been set
+        for the file, that value is stored.
+    :keyword str content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the file, that value is stored.
+    :keyword str cache_control:
+        If the cache_control has previously been set for
+        the file, that value is stored.
+    :keyword bytearray content_md5:
+        If the content_md5 has been set for the file, this response
+        header is stored so that the client can check for message content
+        integrity.
+    """
+
+    def __init__(
+            self, **kwargs):
+        super(ContentSettings, self).__init__(
+            **kwargs
+        )
+
+
+class AccountSasPermissions(BlobAccountSasPermissions):
+    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
+                 create=False):
+        super(AccountSasPermissions, self).__init__(
+            read=read, create=create, write=write, list=list,
+            delete=delete
+        )
+
+
+class FileSystemSasPermissions(object):
+    """FileSystemSasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_file_system_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc.
+    :param bool write:
+        Create or write content, properties, metadata. Lease the file system.
+    :param bool delete:
+        Delete the file system.
+    :param bool list:
+        List paths in the file system.
+    :keyword bool add:
+        Append data to a file in the directory.
+    :keyword bool create:
+        Write a new file, snapshot a file, or copy a file to a new file.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
+                 **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = kwargs.pop('create', None)
+        self.write = write
+        self.delete = delete
+        self.list = list
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('l' if self.list else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a FileSystemSasPermissions from a string.
+
+        To specify read, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A FileSystemSasPermissions object
+        :rtype: ~azure.storage.filedatalake.FileSystemSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_list = 'l' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, write=p_write, delete=p_delete,
+                     list=p_list, add=p_add, create=p_create, move=p_move,
+                     execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class DirectorySasPermissions(object):
+    """DirectorySasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_directory_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc.
+    :param bool create:
+        Create a new directory
+    :param bool write:
+        Create or write content, properties, metadata. Lease the directory.
+    :param bool delete:
+        Delete the directory.
+    :keyword bool add:
+        Append data to a file in the directory.
+    :keyword bool list:
+        List any files in the directory. Implies Execute.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, create=False, write=False,
+                 delete=False, **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.list = kwargs.pop('list', None)
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('l' if self.list else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a DirectorySasPermissions from a string.
+
+        To specify read, create, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A DirectorySasPermissions object
+        :rtype: ~azure.storage.filedatalake.DirectorySasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_list = 'l' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add,
+                     list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class FileSasPermissions(object):
+    """FileSasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_file_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc. Use the file as
+        the source of a read operation.
+    :param bool create:
+        Write a new file.
+    :param bool write:
+        Create or write content, properties, metadata. Lease the file.
+    :param bool delete:
+        Delete the file.
+    :keyword bool add:
+        Append data to the file.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a FileSasPermissions from a string.
+
+        To specify read, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A FileSasPermissions object
+        :rtype: ~azure.storage.filedatalake.FileSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add,
+                     move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class AccessPolicy(BlobAccessPolicy):
+    """Access Policy class used by the set and get access policy methods in each service.
+
+    A stored access policy can specify the start time, expiry time, and
+    permissions for the Shared Access Signatures with which it's associated.
+    Depending on how you want to control access to your resource, you can
+    specify all of these parameters within the stored access policy, and omit
+    them from the URL for the Shared Access Signature. Doing so permits you to
+    modify the associated signature's behavior at any time, as well as to revoke
+    it. Or you can specify one or more of the access policy parameters within
+    the stored access policy, and the others on the URL. Finally, you can
+    specify all of the parameters on the URL. In this case, you can use the
+    stored access policy to revoke the signature, but not to modify its behavior.
+
+    Together the Shared Access Signature and the stored access policy must
+    include all fields required to authenticate the signature. If any required
+    fields are missing, the request will fail. Likewise, if a field is specified
+    both in the Shared Access Signature URL and in the stored access policy, the
+    request will fail with status code 400 (Bad Request).
+
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    """
+
+    def __init__(self, permission=None, expiry=None, **kwargs):
+        super(AccessPolicy, self).__init__(
+            permission=permission, expiry=expiry, start=kwargs.pop('start', None)
+        )
+
+
+class ResourceTypes(BlobResourceTypes):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g.List File Systems)
+    :param bool file_system:
+        Access to file_system-level APIs (e.g., Create/Delete file system,
+        List Directories/Files)
+    :param bool object:
+        Access to object-level APIs for
+        files(e.g. Create File, etc.)
+    """
+
+    def __init__(self, service=False, file_system=False, object=False  # pylint: disable=redefined-builtin
+                 ):
+        super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
+
+
+class UserDelegationKey(BlobUserDelegationKey):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+
+    :ivar str signed_oid:
+        Object ID of this token.
+    :ivar str signed_tid:
+        Tenant ID of the tenant that issued this token.
+    :ivar str signed_start:
+        The datetime this token becomes valid.
+    :ivar str signed_expiry:
+        The datetime this token expires.
+    :ivar str signed_service:
+        What service this key is valid for.
+    :ivar str signed_version:
+        The version identifier of the REST service that created this token.
+    :ivar str value:
+        The user delegation key.
+    """
+
+    @classmethod
+    def _from_generated(cls, generated):
+        delegation_key = cls()
+        delegation_key.signed_oid = generated.signed_oid
+        delegation_key.signed_tid = generated.signed_tid
+        delegation_key.signed_start = generated.signed_start
+        delegation_key.signed_expiry = generated.signed_expiry
+        delegation_key.signed_service = generated.signed_service
+        delegation_key.signed_version = generated.signed_version
+        delegation_key.value = generated.value
+        return delegation_key
+
+
+class PublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies whether data in the file system may be accessed publicly and the level of access.
+    """
+
+    FILE = 'blob'
+    """
+    Specifies public read access for files. file data within this file system can be read
+    via anonymous request, but file system data is not available. Clients cannot enumerate
+    files within the container via anonymous request.
+    """
+
+    FILESYSTEM = 'container'
+    """
+    Specifies full public read access for file system and file data. Clients can enumerate
+    files within the file system via anonymous request, but cannot enumerate file systems
+    within the storage account.
+    """
+
+    @classmethod
+    def _from_generated(cls, public_access):
+        if public_access == "blob":  # pylint:disable=no-else-return
+            return cls.File
+        elif public_access == "container":
+            return cls.FileSystem
+
+        return None
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class DelimitedJsonDialect(BlobDelimitedJSON):
+    """Defines the input or output JSON serialization for a datalake query.
+
+    :keyword str delimiter: The line separator character, default value is '\\\\n'.
+    """
+
+
+class DelimitedTextDialect(BlobDelimitedTextDialect):
+    """Defines the input or output delimited (CSV) serialization for a datalake query request.
+
+    :keyword str delimiter:
+        Column separator, defaults to ','.
+    :keyword str quotechar:
+        Field quote, defaults to '"'.
+    :keyword str lineterminator:
+        Record separator, defaults to '\\\\n'.
+    :keyword str escapechar:
+        Escape char, defaults to empty.
+    :keyword bool has_header:
+        Whether the blob data includes headers in the first line. The default value is False, meaning that the
+        data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
+        of the first line.
+    """
+
+
+class ArrowDialect(BlobArrowDialect):
+    """field of an arrow schema.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :param str type: Required.
+    :keyword str name: The name of the field.
+    :keyword int precision: The precision of the field.
+    :keyword int scale: The scale of the field.
+    """
+
+
+class CustomerProvidedEncryptionKey(BlobCustomerProvidedEncryptionKey):
+    """
+    All data in Azure Storage is encrypted at-rest using an account-level encryption key.
+    In versions 2021-06-08 and newer, you can manage the key used to encrypt file contents
+    and application metadata per-file by providing an AES-256 encryption key in requests to the storage service.
+
+    When you use a customer-provided key, Azure Storage does not manage or persist your key.
+    When writing data to a file, the provided key is used to encrypt your data before writing it to disk.
+    A SHA-256 hash of the encryption key is written alongside the file contents,
+    and is used to verify that all subsequent operations against the file use the same encryption key.
+    This hash cannot be used to retrieve the encryption key or decrypt the contents of the file.
+    When reading a file, the provided key is used to decrypt your data after reading it from disk.
+    In both cases, the provided encryption key is securely discarded
+    as soon as the encryption or decryption process completes.
+
+    :param str key_value:
+        Base64-encoded AES-256 encryption key value.
+    :param str key_hash:
+        Base64-encoded SHA256 of the encryption key.
+    :ivar str algorithm:
+        Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
+    """
+
+class EncryptionScopeOptions(BlobContainerEncryptionScope):
+    """The default encryption scope configuration for a file system.
+
+    This scope is used implicitly for all future writes within the file system,
+    but can be overridden per blob operation.
+
+    .. versionadded:: 12.9.0
+
+    :param str default_encryption_scope:
+        Specifies the default encryption scope to set on the file system and use for
+        all future writes.
+    :param bool prevent_encryption_scope_override:
+        If true, prevents any request from specifying a different encryption scope than the scope
+        set on the file system. Default value is false.
+    """
+
+class QuickQueryDialect(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Specifies the quick query input/output dialect."""
+
+    DELIMITEDTEXT = 'DelimitedTextDialect'
+    DELIMITEDJSON = 'DelimitedJsonDialect'
+    PARQUET = 'ParquetDialect'
+
+
+class ArrowType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    INT64 = "int64"
+    BOOL = "bool"
+    TIMESTAMP_MS = "timestamp[ms]"
+    STRING = "string"
+    DOUBLE = "double"
+    DECIMAL = 'decimal'
+
+
+class DataLakeFileQueryError(object):
+    """The error happened during quick query operation.
+
+    :ivar str error:
+        The name of the error.
+    :ivar bool is_fatal:
+        If true, this error prevents further query processing. More result data may be returned,
+        but there is no guarantee that all of the original data will be processed.
+        If false, this error does not prevent further query processing.
+    :ivar str description:
+        A description of the error.
+    :ivar int position:
+        The blob offset at which the error occurred.
+    """
+
+    def __init__(self, error=None, is_fatal=False, description=None, position=None):
+        self.error = error
+        self.is_fatal = is_fatal
+        self.description = description
+        self.position = position
+
+
+class AccessControlChangeCounters(DictMixin):
+    """
+    AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively.
+
+    :ivar int directories_successful:
+        Number of directories where Access Control List has been updated successfully.
+    :ivar int files_successful:
+        Number of files where Access Control List has been updated successfully.
+    :ivar int failure_count:
+        Number of paths where Access Control List update has failed.
+    """
+
+    def __init__(self, directories_successful, files_successful, failure_count):
+        self.directories_successful = directories_successful
+        self.files_successful = files_successful
+        self.failure_count = failure_count
+
+
+class AccessControlChangeResult(DictMixin):
+    """
+    AccessControlChangeResult contains result of operations that change Access Control Lists recursively.
+
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters:
+        Contains counts of paths changed from start of the operation.
+    :ivar str continuation:
+        Optional continuation token.
+        Value is present when operation is split into multiple batches and can be used to resume progress.
+    """
+
+    def __init__(self, counters, continuation):
+        self.counters = counters
+        self.continuation = continuation
+
+
+class AccessControlChangeFailure(DictMixin):
+    """
+    Represents an entry that failed to update Access Control List.
+
+    :ivar str name:
+        Name of the entry.
+    :ivar bool is_directory:
+        Indicates whether the entry is a directory.
+    :ivar str error_message:
+        Indicates the reason why the entry failed to update.
+    """
+
+    def __init__(self, name, is_directory, error_message):
+        self.name = name
+        self.is_directory = is_directory
+        self.error_message = error_message
+
+
+class AccessControlChanges(DictMixin):
+    """
+    AccessControlChanges contains batch and cumulative counts of operations
+    that change Access Control Lists recursively.
+    Additionally it exposes path entries that failed to update while these operations progress.
+
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters:
+        Contains counts of paths changed within single batch.
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters:
+        Contains counts of paths changed from start of the operation.
+    :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures:
+        List of path entries that failed to update Access Control List within single batch.
+    :ivar str continuation:
+        An opaque continuation token that may be used to resume the operations in case of failures.
+    """
+
+    def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
+        self.batch_counters = batch_counters
+        self.aggregate_counters = aggregate_counters
+        self.batch_failures = batch_failures
+        self.continuation = continuation
+
+
+class DeletedPathProperties(DictMixin):
+    """
+    Properties populated for a deleted path.
+
+    :ivar str name:
+        The name of the file in the path.
+    :ivar ~datetime.datetime deleted_time:
+        A datetime object representing the time at which the path was deleted.
+    :ivar int remaining_retention_days:
+        The number of days that the path will be retained before being permanently deleted by the service.
+    :ivar str deletion_id:
+        The id associated with the deleted path.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.deleted_time = None
+        self.remaining_retention_days = None
+        self.deletion_id = None
+
+
+class AnalyticsLogging(GenLogging):
+    """Azure Analytics Logging settings.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool delete:
+        Indicates whether all delete requests should be logged. The default value is `False`.
+    :keyword bool read:
+        Indicates whether all read requests should be logged. The default value is `False`.
+    :keyword bool write:
+        Indicates whether all write requests should be logged. The default value is `False`.
+    :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    def __init__(self, **kwargs):
+        self.version = kwargs.get('version', '1.0')
+        self.delete = kwargs.get('delete', False)
+        self.read = kwargs.get('read', False)
+        self.write = kwargs.get('write', False)
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            delete=generated.delete,
+            read=generated.read,
+            write=generated.write,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class Metrics(GenMetrics):
+    """A summary of request statistics grouped by API in hour or minute aggregates.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool enabled:
+        Indicates whether metrics are enabled for the Datalake service.
+        The default value is `False`.
+    :keyword bool include_apis:
+        Indicates whether metrics should generate summary statistics for called API operations.
+    :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    def __init__(self, **kwargs):
+        self.version = kwargs.get('version', '1.0')
+        self.enabled = kwargs.get('enabled', False)
+        self.include_apis = kwargs.get('include_apis')
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            enabled=generated.enabled,
+            include_apis=generated.include_apis,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class RetentionPolicy(GenRetentionPolicy):
+    """The retention policy which determines how long the associated data should
+    persist.
+
+    :param bool enabled:
+        Indicates whether a retention policy is enabled for the storage service.
+        The default value is False.
+    :param int days:
+        Indicates the number of days that metrics or logging or
+        soft-deleted data should be retained. All data older than this value will
+        be deleted. If enabled=True, the number of days must be specified.
+    """
+
+    def __init__(self, enabled=False, days=None):
+        super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None)
+        if self.enabled and (self.days is None):
+            raise ValueError("If policy is enabled, 'days' must be specified.")
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            days=generated.days,
+        )
+
+
+class StaticWebsite(GenStaticWebsite):
+    """The properties that enable an account to host a static website.
+
+    :keyword bool enabled:
+        Indicates whether this account is hosting a static website.
+        The default value is `False`.
+    :keyword str index_document:
+        The default name of the index page under each directory.
+    :keyword str error_document404_path:
+        The absolute path of the custom 404 page.
+    :keyword str default_index_document_path:
+        Absolute path of the default index page.
+    """
+
+    def __init__(self, **kwargs):
+        self.enabled = kwargs.get('enabled', False)
+        if self.enabled:
+            self.index_document = kwargs.get('index_document')
+            self.error_document404_path = kwargs.get('error_document404_path')
+            self.default_index_document_path = kwargs.get('default_index_document_path')
+        else:
+            self.index_document = None
+            self.error_document404_path = None
+            self.default_index_document_path = None
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            index_document=generated.index_document,
+            error_document404_path=generated.error_document404_path,
+            default_index_document_path=generated.default_index_document_path
+        )
+
+
+class CorsRule(GenCorsRule):
+    """CORS is an HTTP feature that enables a web application running under one
+    domain to access resources in another domain. Web browsers implement a
+    security restriction known as same-origin policy that prevents a web page
+    from calling APIs in a different domain; CORS provides a secure way to
+    allow one domain (the origin domain) to call APIs in another domain.
+
+    :param list(str) allowed_origins:
+        A list of origin domains that will be allowed via CORS, or "*" to allow
+        all domains. The list of must contain at least one entry. Limited to 64
+        origin domains. Each allowed origin can have up to 256 characters.
+    :param list(str) allowed_methods:
+        A list of HTTP methods that are allowed to be executed by the origin.
+        The list of must contain at least one entry. For Azure Storage,
+        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+    :keyword list(str) allowed_headers:
+        Defaults to an empty list. A list of headers allowed to be part of
+        the cross-origin request. Limited to 64 defined headers and 2 prefixed
+        headers. Each header can be up to 256 characters.
+    :keyword list(str) exposed_headers:
+        Defaults to an empty list. A list of response headers to expose to CORS
+        clients. Limited to 64 defined headers and two prefixed headers. Each
+        header can be up to 256 characters.
+    :keyword int max_age_in_seconds:
+        The number of seconds that the client/browser should cache a
+        preflight response.
+    """
+
+    def __init__(self, allowed_origins, allowed_methods, **kwargs):
+        self.allowed_origins = ','.join(allowed_origins)
+        self.allowed_methods = ','.join(allowed_methods)
+        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        return cls(
+            [generated.allowed_origins],
+            [generated.allowed_methods],
+            allowed_headers=[generated.allowed_headers],
+            exposed_headers=[generated.exposed_headers],
+            max_age_in_seconds=generated.max_age_in_seconds,
+        )
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py
new file mode 100644
index 00000000..5130ef44
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py
@@ -0,0 +1,1118 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import re
+from datetime import datetime
+from typing import (
+    Any, Dict, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import urlparse, quote
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._deserialize import process_storage_error
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \
+    AccessControlChangeCounters, AccessControlChangeFailure
+from ._serialize import (
+    add_metadata_headers,
+    compare_api_versions,
+    convert_datetime_to_rfc1123,
+    convert_dfs_url_to_blob_url,
+    get_access_conditions,
+    get_api_version,
+    get_cpk_info,
+    get_lease_id,
+    get_mod_conditions,
+    get_path_http_headers,
+    get_source_mod_conditions,
+)
+from ._shared.base_client import StorageAccountHostsMixin, parse_query
+from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import ContentSettings, FileProperties
+
+
+class PathClient(StorageAccountHostsMixin):
+    """A base client for interacting with a DataLake file/directory, even if the file/directory may not
+    yet exist.
+
+    :param str account_url:
+        The URI to the storage account.
+    :param str file_system_name:
+        The file system for the directory or files.
+    :param str file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+            self, account_url: str,
+            file_system_name: str,
+            path_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("Account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+
+        # remove the preceding/trailing delimiter from the path components
+        file_system_name = file_system_name.strip('/')
+
+        # the name of root directory is /
+        if path_name != '/':
+            path_name = path_name.strip('/')
+
+        if not (file_system_name and path_name):
+            raise ValueError("Please specify a file system name and file path.")
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        self._blob_account_url = blob_account_url
+
+        datalake_hosts = kwargs.pop('_hosts', None)
+        blob_hosts = None
+        if datalake_hosts:
+            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+        self._blob_client = BlobClient(blob_account_url, file_system_name, path_name,
+                                       credential=credential, _hosts=blob_hosts, **kwargs)
+
+        _, sas_token = parse_query(parsed_url.query)
+        self.file_system_name = file_system_name
+        self.path_name = path_name
+
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+                                         _hosts=datalake_hosts, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client(self.url)
+        self._datalake_client_for_blob_operation = self._build_generated_client(self._blob_client.url)
+
+    def _build_generated_client(self, url: str) -> AzureDataLakeStorageRESTAPI:
+        client = AzureDataLakeStorageRESTAPI(
+            url,
+            base_url=url,
+            file_system=self.file_system_name,
+            path=self.path_name,
+            pipeline=self._pipeline
+        )
+        client._config.version = self._api_version  # pylint: disable=protected-access
+        return client
+
+    def __exit__(self, *args):
+        self._blob_client.close()
+        self._datalake_client_for_blob_operation.close()
+        super(PathClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    def _format_url(self, hostname):
+        file_system_name = self.file_system_name
+        if isinstance(file_system_name, str):
+            file_system_name = file_system_name.encode('UTF-8')
+        return (f"{self.scheme}://{hostname}/{quote(file_system_name)}/"
+                f"{quote(self.path_name, safe='~')}{self._query_str}")
+
+    def _create_path_options(self, resource_type,
+                             content_settings=None,  # type: Optional[ContentSettings]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> Dict[str, Any]
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        cpk_info = get_cpk_info(self.scheme, kwargs)
+
+        expires_on = kwargs.pop('expires_on', None)
+        if expires_on:
+            try:
+                expires_on = convert_datetime_to_rfc1123(expires_on)
+                kwargs['expiry_options'] = 'Absolute'
+            except AttributeError:
+                expires_on = str(expires_on)
+                kwargs['expiry_options'] = 'RelativeToNow'
+
+        options = {
+            'resource': resource_type,
+            'properties': add_metadata_headers(metadata),
+            'permissions': kwargs.pop('permissions', None),
+            'umask': kwargs.pop('umask', None),
+            'owner': kwargs.pop('owner', None),
+            'group': kwargs.pop('group', None),
+            'acl': kwargs.pop('acl', None),
+            'proposed_lease_id': kwargs.pop('lease_id', None),
+            'lease_duration': kwargs.pop('lease_duration', None),
+            'expiry_options': kwargs.pop('expiry_options', None),
+            'expires_on': expires_on,
+            'path_http_headers': path_http_headers,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'encryption_context': kwargs.pop('encryption_context', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create directory or file
+
+        :param resource_type:
+            Required for Create File and Create Directory.
+            The value must be "file" or "directory". Possible values include:
+            'directory', 'file'
+        :type resource_type: str
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file/directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        lease_id = kwargs.get('lease_id', None)
+        lease_duration = kwargs.get('lease_duration', None)
+        if lease_id and not lease_duration:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        if lease_duration and not lease_id:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        options = self._create_path_options(
+            resource_type,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _delete_path_options(paginated: Optional[bool], **kwargs) -> Dict[str, Any]:
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'paginated': paginated,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'cls': return_response_headers,
+            'timeout': kwargs.pop('timeout', None)}
+        options.update(kwargs)
+        return options
+
+    def _delete(self, **kwargs):
+        # type: (**Any) -> Dict[Union[datetime, str]]
+        """
+        Marks the specified path for deletion.
+
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary containing information about the deleted path.
+        :rtype: dict[str, Any]
+        """
+        # Perform paginated delete only if using OAuth, deleting a directory, and api version is 2023-08-03 or later
+        # The pagination is only for ACL checks, the final request remains the atomic delete operation
+        paginated = None
+        if (compare_api_versions(self.api_version, '2023-08-03') >= 0 and
+            hasattr(self.credential, 'get_token') and
+            kwargs.get('recursive')):  # Directory delete will always specify recursive
+            paginated = True
+
+        options = self._delete_path_options(paginated, **kwargs)
+        try:
+            response_headers = self._client.path.delete(**options)
+            # Loop until continuation token is None for paginated delete
+            while response_headers['continuation']:
+                response_headers = self._client.path.delete(
+                    continuation=response_headers['continuation'],
+                    **options)
+
+            return response_headers
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs):
+        # type: (...) -> Dict[str, Any]
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'owner': owner,
+            'group': group,
+            'permissions': permissions,
+            'acl': acl,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def set_access_control(self, owner=None,  # type: Optional[str]
+                           group=None,  # type: Optional[str]
+                           permissions=None,  # type: Optional[str]
+                           acl=None,  # type: Optional[str]
+                           **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Set the owner, group, permissions, or access control list for a path.
+
+        :param owner:
+            Optional. The owner of the file or directory.
+        :type owner: str
+        :param group:
+            Optional. The owning group of the file or directory.
+        :type group: str
+        :param permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+            permissions and acl are mutually exclusive.
+        :type permissions: str
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+            permissions and acl are mutually exclusive.
+        :type acl: str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        if not any([owner, group, permissions, acl]):
+            raise ValueError("At least one parameter should be set for set_access_control API")
+        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+        try:
+            return self._client.path.set_access_control(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _get_access_control_options(upn=None,  # type: Optional[bool]
+                                    **kwargs):
+        # type: (...) -> Dict[str, Any]
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'action': 'getAccessControl',
+            'upn': upn if upn else False,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def get_access_control(self, upn=None,  # type: Optional[bool]
+                           **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """
+        :param upn: Optional.
+            Valid only when Hierarchical Namespace is
+            enabled for the account. If "true", the user identity values returned
+            in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+            transformed from Azure Active Directory Object IDs to User Principal
+            Names.  If "false", the values will be returned as Azure Active
+            Directory Object IDs. The default value is false. Note that group and
+            application Object IDs are not translated because they do not have
+            unique friendly names.
+        :type upn: bool
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options with no modifications.
+        :rtype: dict[str, Any]
+        """
+        options = self._get_access_control_options(upn=upn, **kwargs)
+        try:
+            return self._client.path.get_properties(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _set_access_control_recursive_options(mode, acl, **kwargs):
+        # type: (str, str, **Any) -> Dict[str, Any]
+
+        options = {
+            'mode': mode,
+            'force_flag': kwargs.pop('continue_on_failure', None),
+            'timeout': kwargs.pop('timeout', None),
+            'continuation': kwargs.pop('continuation_token', None),
+            'max_records': kwargs.pop('batch_size', None),
+            'acl': acl,
+            'cls': return_headers_and_deserialized}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def set_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Sets the Access Control on a path and sub-paths.
+
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    @distributed_trace
+    def update_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Modifies the Access Control on a path and sub-paths.
+
+        :param acl:
+            Modifies POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    @distributed_trace
+    def remove_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Removes the Access Control on a path and sub-paths.
+
+        :param acl:
+            Removes POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, and a user or
+            group identifier in the format "[scope:][type]:[id]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed then,
+            continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+        try:
+            continue_on_failure = options.get('force_flag')
+            total_directories_successful = 0
+            total_files_success = 0
+            total_failure_count = 0
+            batch_count = 0
+            last_continuation_token = None
+            current_continuation_token = None
+            continue_operation = True
+            while continue_operation:
+                headers, resp = self._client.path.set_access_control_recursive(**options)
+
+                # make a running tally so that we can report the final results
+                total_directories_successful += resp.directories_successful
+                total_files_success += resp.files_successful
+                total_failure_count += resp.failure_count
+                batch_count += 1
+                current_continuation_token = headers['continuation']
+
+                if current_continuation_token is not None:
+                    last_continuation_token = current_continuation_token
+
+                if progress_hook is not None:
+                    progress_hook(AccessControlChanges(
+                        batch_counters=AccessControlChangeCounters(
+                            directories_successful=resp.directories_successful,
+                            files_successful=resp.files_successful,
+                            failure_count=resp.failure_count,
+                        ),
+                        aggregate_counters=AccessControlChangeCounters(
+                            directories_successful=total_directories_successful,
+                            files_successful=total_files_success,
+                            failure_count=total_failure_count,
+                        ),
+                        batch_failures=[AccessControlChangeFailure(
+                            name=failure.name,
+                            is_directory=failure.type == 'DIRECTORY',
+                            error_message=failure.error_message) for failure in resp.failed_entries],
+                        continuation=last_continuation_token))
+
+                # update the continuation token, if there are more operations that cannot be completed in a single call
+                max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+                continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+                options['continuation'] = current_continuation_token
+
+            # currently the service stops on any failure, so we should send back the last continuation token
+            # for the user to retry the failed updates
+            # otherwise we should just return what the service gave us
+            return AccessControlChangeResult(counters=AccessControlChangeCounters(
+                directories_successful=total_directories_successful,
+                files_successful=total_files_success,
+                failure_count=total_failure_count),
+                continuation=last_continuation_token
+                if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+        except HttpResponseError as error:
+            error.continuation_token = last_continuation_token
+            process_storage_error(error)
+        except AzureError as error:
+            error.continuation_token = last_continuation_token
+            raise error
+
+    def _parse_rename_path(self, new_name: str) -> Tuple[str, str, Optional[str]]:
+        new_name = new_name.strip('/')
+        new_file_system = new_name.split('/')[0]
+        new_path = new_name[len(new_file_system):].strip('/')
+
+        new_sas = None
+        sas_split = new_path.split('?')
+        # If there is a ?, there could be a SAS token
+        if len(sas_split) > 0:
+            # Check last element for SAS by looking for sv= and sig=
+            potential_sas = sas_split[-1]
+            if re.search(r'sv=\d{4}-\d{2}-\d{2}', potential_sas) and 'sig=' in potential_sas:
+                new_sas = potential_sas
+                # Remove SAS from new path
+                new_path = new_path[:-(len(new_sas) + 1)]
+
+        if not new_sas:
+            if not self._raw_credential and new_file_system != self.file_system_name:
+                raise ValueError("please provide the sas token for the new file")
+            if not self._raw_credential and new_file_system == self.file_system_name:
+                new_sas = self._query_str.strip('?')
+
+        return new_file_system, new_path, new_sas
+
+    def _rename_path_options(self,
+                             rename_source,  # type: str
+                             content_settings=None,  # type: Optional[ContentSettings]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> Dict[str, Any]
+        if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None):
+            raise ValueError("metadata, permissions, umask is not supported for this operation")
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        source_lease_id = get_lease_id(kwargs.pop('source_lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+        source_mod_conditions = get_source_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        options = {
+            'rename_source': rename_source,
+            'path_http_headers': path_http_headers,
+            'lease_access_conditions': access_conditions,
+            'source_lease_id': source_lease_id,
+            'modified_access_conditions': mod_conditions,
+            'source_modified_access_conditions': source_mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'mode': 'legacy',
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    def _rename_path(self, rename_source, **kwargs):
+        # type: (str, **Any) -> Dict[str, Any]
+        """
+        Rename directory or file
+
+        :param rename_source:
+            The value must have the following format: "/{filesystem}/{path}".
+        :type rename_source: str
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing information about the renamed path.
+        :rtype: dict[str, Any]
+        """
+        options = self._rename_path_options(
+            rename_source,
+            **kwargs)
+        try:
+            return self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_path_properties(self, **kwargs):
+        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file or directory. It does not return the content of the directory or file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file/directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User 
+            Principal Names in the owner, group, and acl fields of the respective property object returned.
+            If False, the values will be returned as Azure Active Directory Object IDs.
+            The default value is False. Note that group and application Object IDs are not translate
+            because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties or FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../tests/test_blob_samples_common.py
+                :start-after: [START get_blob_properties]
+                :end-before: [END get_blob_properties]
+                :language: python
+                :dedent: 8
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        path_properties = self._blob_client.get_blob_properties(**kwargs)
+        return path_properties
+
+    def _exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a path exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a path exists, False otherwise.
+        :rtype: bool
+        """
+        return self._blob_client.exists(**kwargs)
+
+    @distributed_trace
+    def set_metadata(self, metadata,  # type: Dict[str, str]
+                     **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def set_http_headers(self, content_settings: Optional["ContentSettings"] = None, **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """Sets system properties on the file or directory.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set file/directory properties.
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file/directory-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+        """
+        return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+    @distributed_trace
+    def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
+                      lease_id=None,  # type: Optional[str]
+                      **kwargs):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file or directory does not have an active lease,
+        the DataLake service creates a lease on the file/directory and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
+        lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py
new file mode 100644
index 00000000..b4f44c36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py
@@ -0,0 +1,73 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Union, Iterable, IO
+
+
+class DataLakeFileQueryReader(object):
+    """A streaming object to read query results.
+
+    :ivar str name:
+        The name of the blob being quered.
+    :ivar str container:
+        The name of the container where the blob is.
+    :ivar dict response_headers:
+        The response_headers of the quick query request.
+    :ivar bytes record_delimiter:
+        The delimiter used to separate lines, or records with the data. The `records`
+        method will return these lines via a generator.
+    """
+
+    def __init__(
+        self,
+        blob_query_reader
+    ):
+        self.name = blob_query_reader.name
+        self.file_system = blob_query_reader.container
+        self.response_headers = blob_query_reader.response_headers
+        self.record_delimiter = blob_query_reader.record_delimiter
+        self._bytes_processed = 0
+        self._blob_query_reader = blob_query_reader
+
+    def __len__(self):
+        return len(self._blob_query_reader)
+
+    def readall(self):
+        # type: () -> Union[bytes, str]
+        """Return all query results.
+
+        This operation is blocking until all data is downloaded.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: All query results.
+        :rtype: Union[bytes, str]
+        """
+        return self._blob_query_reader.readall()
+
+    def readinto(self, stream):
+        # type: (IO) -> None
+        """Download the query result to a stream.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream.
+        :returns: None
+        """
+        self._blob_query_reader(stream)
+
+    def records(self):
+        # type: () -> Iterable[Union[bytes, str]]
+        """Returns a record generator for the query result.
+
+        Records will be returned line by line.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: A record generator for the query result.
+        :rtype: Iterable[Union[bytes, str]]
+        """
+        return self._blob_query_reader.records()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py
new file mode 100644
index 00000000..c0866a32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py
@@ -0,0 +1,185 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Dict, Optional, Union
+
+from azure.storage.blob._serialize import _get_match_headers
+from ._shared import encode_base64
+from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \
+    SourceModifiedAccessConditions, LeaseAccessConditions, CpkInfo
+
+
+_SUPPORTED_API_VERSIONS = [
+    '2019-02-02',
+    '2019-07-07',
+    '2019-10-10',
+    '2019-12-12',
+    '2020-02-10',
+    '2020-04-08',
+    '2020-06-12',
+    '2020-08-04',
+    '2020-10-02',
+    '2020-12-06',
+    '2021-02-12',
+    '2021-04-10',
+    '2021-06-08',
+    '2021-08-06',
+    '2021-12-02',
+    '2022-11-02',
+    '2023-01-03',
+    '2023-05-03',
+    '2023-08-03',
+    '2023-11-03',
+    '2024-05-04',
+    '2024-08-04',
+    '2024-11-04',
+    '2025-01-05',
+    '2025-05-05',
+]  # This list must be in chronological order!
+
+
+def get_api_version(kwargs):
+    # type: (Dict[str, Any]) -> str
+    api_version = kwargs.get('api_version', None)
+    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
+        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
+        raise ValueError(f"Unsupported API version '{api_version}'. Please select from:\n{versions}")
+    return api_version or _SUPPORTED_API_VERSIONS[-1]
+
+
+def compare_api_versions(version1: str, version2: str) -> int:
+    v1 = _SUPPORTED_API_VERSIONS.index(version1)
+    v2 = _SUPPORTED_API_VERSIONS.index(version2)
+    if v1 == v2:
+        return 0
+    if v1 < v2:
+        return -1
+    return 1
+
+
+def convert_dfs_url_to_blob_url(dfs_account_url):
+    return dfs_account_url.replace('.dfs.', '.blob.', 1)
+
+
+def convert_datetime_to_rfc1123(date):
+    weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()]
+    month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
+             "Oct", "Nov", "Dec"][date.month - 1]
+    return f"{weekday}, {date.day:02} {month} {date.year:04} {date.hour:02}:{date.minute:02}:{date.second:02} GMT"
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> str
+    if not metadata:
+        return None
+    headers = []
+    if metadata:
+        for key, value in metadata.items():
+            headers.append(key + '=')
+            headers.append(encode_base64(value))
+            headers.append(',')
+
+    if headers:
+        del headers[-1]
+
+    return ''.join(headers)
+
+
+def get_mod_conditions(kwargs):
+    # type: (Dict[str, Any]) -> ModifiedAccessConditions
+    if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
+    return ModifiedAccessConditions(
+        if_modified_since=kwargs.pop('if_modified_since', None),
+        if_unmodified_since=kwargs.pop('if_unmodified_since', None),
+        if_match=if_match or kwargs.pop('if_match', None),
+        if_none_match=if_none_match or kwargs.pop('if_none_match', None)
+    )
+
+
+def get_source_mod_conditions(kwargs):
+    # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
+    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+    return SourceModifiedAccessConditions(
+        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+        source_if_match=if_match or kwargs.pop('source_if_match', None),
+        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
+    )
+
+
+def get_path_http_headers(content_settings):
+    path_headers = PathHTTPHeaders(
+        cache_control=content_settings.cache_control,
+        content_type=content_settings.content_type,
+        content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+        content_encoding=content_settings.content_encoding,
+        content_language=content_settings.content_language,
+        content_disposition=content_settings.content_disposition
+    )
+    return path_headers
+
+
+def get_access_conditions(lease):
+    # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
+    try:
+        lease_id = lease.id # type: ignore
+    except AttributeError:
+        lease_id = lease  # type: ignore
+    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+
+def get_lease_id(lease):
+    if not lease:
+        return ""
+    try:
+        lease_id = lease.id
+    except AttributeError:
+        lease_id = lease
+    return lease_id
+
+
+def get_lease_action_properties(kwargs: Dict[str, Any]) -> Dict[str, Any]:
+    lease_action = kwargs.pop('lease_action', None)
+    lease_duration = kwargs.pop('lease_duration', None)
+    lease = kwargs.pop('lease', None)
+    try:
+        lease_id = lease.id
+    except AttributeError:
+        lease_id = lease
+
+    proposed_lease_id = None
+    access_conditions = None
+
+    # Acquiring a new lease
+    if lease_action in ['acquire', 'acquire-release']:
+        # Use provided lease id as the new lease id
+        proposed_lease_id = lease_id
+        # Assign a default lease duration if not provided
+        lease_duration = lease_duration or -1
+    else:
+        # Use lease id as access conditions
+        access_conditions = LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+    return {
+        'lease_action': lease_action,
+        'lease_duration': lease_duration,
+        'proposed_lease_id': proposed_lease_id,
+        'lease_access_conditions': access_conditions
+    }
+
+
+def get_cpk_info(scheme, kwargs):
+    # type: (str, Dict[str, Any]) -> CpkInfo
+    cpk = kwargs.pop('cpk', None)
+    if cpk:
+        if scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        return CpkInfo(
+            encryption_key=cpk.key_value,
+            encryption_key_sha256=cpk.key_hash,
+            encryption_algorithm=cpk.algorithm)
+
+    return None
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py
new file mode 100644
index 00000000..a8b1a27d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py
@@ -0,0 +1,54 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote # type: ignore
+
+
+def url_quote(url):
+    return quote(url)
+
+
+def url_unquote(url):
+    return unquote(url)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+    decoded_bytes = decode_base64_to_bytes(data)
+    return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+    if key_is_base64:
+        key = decode_base64_to_bytes(key)
+    else:
+        if isinstance(key, str):
+            key = key.encode('utf-8')
+    if isinstance(string_to_sign, str):
+        string_to_sign = string_to_sign.encode('utf-8')
+    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+    digest = signed_hmac_sha256.digest()
+    encoded_digest = encode_base64(digest)
+    return encoded_digest
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py
new file mode 100644
index 00000000..b41f2391
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py
@@ -0,0 +1,245 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import re
+from typing import List, Tuple
+from urllib.parse import unquote, urlparse
+from functools import cmp_to_key
+
+try:
+    from yarl import URL
+except ImportError:
+    pass
+
+try:
+    from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+except ImportError:
+    AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+logger = logging.getLogger(__name__)
+
+
+table_lv0 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725,
+    0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e,
+    0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51,
+    0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9,
+    0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25,
+    0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99,
+    0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0,
+]
+
+table_lv4 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+]
+
+def compare(lhs: str, rhs: str) -> int:  # pylint:disable=too-many-return-statements
+    tables = [table_lv0, table_lv4]
+    curr_level, i, j, n = 0, 0, 0, len(tables)
+    lhs_len = len(lhs)
+    rhs_len = len(rhs)
+    while curr_level < n:
+        if curr_level == (n - 1) and i != j:
+            if i > j:
+                return -1
+            if i < j:
+                return 1
+            return 0
+
+        w1 = tables[curr_level][ord(lhs[i])] if i < lhs_len else 0x1
+        w2 = tables[curr_level][ord(rhs[j])] if j < rhs_len else 0x1
+
+        if w1 == 0x1 and w2 == 0x1:
+            i = 0
+            j = 0
+            curr_level += 1
+        elif w1 == w2:
+            i += 1
+            j += 1
+        elif w1 == 0:
+            i += 1
+        elif w2 == 0:
+            j += 1
+        else:
+            if w1 < w2:
+                return -1
+            if w1 > w2:
+                return 1
+            return 0
+    return 0
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+    msg = ""
+    if ex.args:
+        msg = ex.args[0]
+    return desired_type(msg)
+
+# This method attempts to emulate the sorting done by the service
+def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
+
+    # Build dict of tuples and list of keys
+    header_dict = {}
+    header_keys = []
+    for k, v in input_headers:
+        header_dict[k] = v
+        header_keys.append(k)
+
+    try:
+        header_keys = sorted(header_keys, key=cmp_to_key(compare))
+    except ValueError as exc:
+        raise ValueError("Illegal character encountered when sorting headers.") from exc
+
+    # Build list of sorted tuples
+    sorted_headers = []
+    for key in header_keys:
+        sorted_headers.append((key, header_dict.pop(key)))
+    return sorted_headers
+
+
+class AzureSigningError(ClientAuthenticationError):
+    """
+    Represents a fatal error when attempting to sign a request.
+    In general, the cause of this exception is user error. For example, the given account key is not valid.
+    Please visit https://learn.microsoft.com/azure/storage/common/storage-create-storage-account for more info.
+    """
+
+
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+    def __init__(self, account_name, account_key):
+        self.account_name = account_name
+        self.account_key = account_key
+        super(SharedKeyCredentialPolicy, self).__init__()
+
+    @staticmethod
+    def _get_headers(request, headers_to_sign):
+        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+        if 'content-length' in headers and headers['content-length'] == '0':
+            del headers['content-length']
+        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+    @staticmethod
+    def _get_verb(request):
+        return request.http_request.method + '\n'
+
+    def _get_canonicalized_resource(self, request):
+        uri_path = urlparse(request.http_request.url).path
+        try:
+            if isinstance(request.context.transport, AioHttpTransport) or \
+                    isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+                    isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+                               AioHttpTransport):
+                uri_path = URL(uri_path)
+                return '/' + self.account_name + str(uri_path)
+        except TypeError:
+            pass
+        return '/' + self.account_name + uri_path
+
+    @staticmethod
+    def _get_canonicalized_headers(request):
+        string_to_sign = ''
+        x_ms_headers = []
+        for name, value in request.http_request.headers.items():
+            if name.startswith('x-ms-'):
+                x_ms_headers.append((name.lower(), value))
+        x_ms_headers = _storage_header_sort(x_ms_headers)
+        for name, value in x_ms_headers:
+            if value is not None:
+                string_to_sign += ''.join([name, ':', value, '\n'])
+        return string_to_sign
+
+    @staticmethod
+    def _get_canonicalized_resource_query(request):
+        sorted_queries = list(request.http_request.query.items())
+        sorted_queries.sort()
+
+        string_to_sign = ''
+        for name, value in sorted_queries:
+            if value is not None:
+                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+        return string_to_sign
+
+    def _add_authorization_header(self, request, string_to_sign):
+        try:
+            signature = sign_string(self.account_key, string_to_sign)
+            auth_string = 'SharedKey ' + self.account_name + ':' + signature
+            request.http_request.headers['Authorization'] = auth_string
+        except Exception as ex:
+            # Wrap any error that occurred as signing error
+            # Doing so will clarify/locate the source of problem
+            raise _wrap_exception(ex, AzureSigningError) from ex
+
+    def on_request(self, request):
+        string_to_sign = \
+            self._get_verb(request) + \
+            self._get_headers(
+                request,
+                [
+                    'content-encoding', 'content-language', 'content-length',
+                    'content-md5', 'content-type', 'date', 'if-modified-since',
+                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+                ]
+            ) + \
+            self._get_canonicalized_headers(request) + \
+            self._get_canonicalized_resource(request) + \
+            self._get_canonicalized_resource_query(request)
+
+        self._add_authorization_header(request, string_to_sign)
+        # logger.debug("String_to_sign=%s", string_to_sign)
+
+
+class StorageHttpChallenge(object):
+    def __init__(self, challenge):
+        """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """
+        if not challenge:
+            raise ValueError("Challenge cannot be empty")
+
+        self._parameters = {}
+        self.scheme, trimmed_challenge = challenge.strip().split(" ", 1)
+
+        # name=value pairs either comma or space separated with values possibly being
+        # enclosed in quotes
+        for item in re.split('[, ]', trimmed_challenge):
+            comps = item.split("=")
+            if len(comps) == 2:
+                key = comps[0].strip(' "')
+                value = comps[1].strip(' "')
+                if key:
+                    self._parameters[key] = value
+
+        # Extract and verify required parameters
+        self.authorization_uri = self._parameters.get('authorization_uri')
+        if not self.authorization_uri:
+            raise ValueError("Authorization Uri not found")
+
+        self.resource_id = self._parameters.get('resource_id')
+        if not self.resource_id:
+            raise ValueError("Resource id not found")
+
+        uri_path = urlparse(self.authorization_uri).path.lstrip("/")
+        self.tenant_id = uri_path.split("/")[0]
+
+    def get_value(self, key):
+        return self._parameters.get(key)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py
new file mode 100644
index 00000000..ceb75bf0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py
@@ -0,0 +1,458 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+import uuid
+from typing import (
+    Any,
+    cast,
+    Dict,
+    Iterator,
+    Optional,
+    Tuple,
+    TYPE_CHECKING,
+    Union,
+)
+from urllib.parse import parse_qs, quote
+
+from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential, TokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import HttpTransport, RequestsTransport  # pylint: disable=non-abstract-transport-import, no-name-in-module
+from azure.core.pipeline.policies import (
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+    ProxyPolicy,
+    RedirectPolicy,
+    UserAgentPolicy,
+)
+
+from .authentication import SharedKeyCredentialPolicy
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import LocationMode, StorageConfiguration
+from .policies import (
+    ExponentialRetry,
+    QueueMessagePolicy,
+    StorageBearerTokenCredentialPolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageLoggingPolicy,
+    StorageRequestHook,
+    StorageResponseHook,
+)
+from .request_handlers import serialize_batch_body, _get_batch_request_delimiter
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .shared_access_signature import QueryStringConstants
+from .._version import VERSION
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class StorageAccountHostsMixin(object):
+    _client: Any
+    def __init__(
+        self,
+        parsed_url: Any,
+        service: str,
+        credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+        self._hosts = kwargs.get("_hosts")
+        self.scheme = parsed_url.scheme
+        self._is_localhost = False
+
+        if service not in ["blob", "queue", "file-share", "dfs"]:
+            raise ValueError(f"Invalid service: {service}")
+        service_name = service.split('-')[0]
+        account = parsed_url.netloc.split(f".{service_name}.core.")
+
+        self.account_name = account[0] if len(account) > 1 else None
+        if not self.account_name and parsed_url.netloc.startswith("localhost") \
+                or parsed_url.netloc.startswith("127.0.0.1"):
+            self._is_localhost = True
+            self.account_name = parsed_url.path.strip("/")
+
+        self.credential = _format_shared_key_credential(self.account_name, credential)
+        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+            raise ValueError("Token credential is only supported with HTTPS.")
+
+        secondary_hostname = None
+        if hasattr(self.credential, "account_name"):
+            self.account_name = self.credential.account_name
+            secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}"
+
+        if not self._hosts:
+            if len(account) > 1:
+                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+            if kwargs.get("secondary_hostname"):
+                secondary_hostname = kwargs["secondary_hostname"]
+            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+        self._sdk_moniker = f"storage-{service}/{VERSION}"
+        self._config, self._pipeline = self._create_pipeline(self.credential, sdk_moniker=self._sdk_moniker, **kwargs)
+
+    def __enter__(self):
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._client.__exit__(*args)
+
+    def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self._client.close()
+
+    @property
+    def url(self):
+        """The full endpoint URL to this entity, including SAS token if used.
+
+        This could be either the primary endpoint,
+        or the secondary endpoint depending on the current :func:`location_mode`.
+        :returns: The full endpoint URL to this entity, including SAS token if used.
+        :rtype: str
+        """
+        return self._format_url(self._hosts[self._location_mode])
+
+    @property
+    def primary_endpoint(self):
+        """The full primary endpoint URL.
+
+        :rtype: str
+        """
+        return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+    @property
+    def primary_hostname(self):
+        """The hostname of the primary endpoint.
+
+        :rtype: str
+        """
+        return self._hosts[LocationMode.PRIMARY]
+
+    @property
+    def secondary_endpoint(self):
+        """The full secondary endpoint URL if configured.
+
+        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: str
+        :raise ValueError:
+        """
+        if not self._hosts[LocationMode.SECONDARY]:
+            raise ValueError("No secondary host configured.")
+        return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+    @property
+    def secondary_hostname(self):
+        """The hostname of the secondary endpoint.
+
+        If not available this will be None. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: Optional[str]
+        """
+        return self._hosts[LocationMode.SECONDARY]
+
+    @property
+    def location_mode(self):
+        """The location mode that the client is currently using.
+
+        By default this will be "primary". Options include "primary" and "secondary".
+
+        :rtype: str
+        """
+
+        return self._location_mode
+
+    @location_mode.setter
+    def location_mode(self, value):
+        if self._hosts.get(value):
+            self._location_mode = value
+            self._client._config.url = self.url  # pylint: disable=protected-access
+        else:
+            raise ValueError(f"No host URL for location mode: {value}")
+
+    @property
+    def api_version(self):
+        """The version of the Storage API used for requests.
+
+        :rtype: str
+        """
+        return self._client._config.version  # pylint: disable=protected-access
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            credential = cast(str, credential)
+            query_str += credential.lstrip("?")
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, Pipeline]:
+        self._credential_policy: Any = None
+        if hasattr(credential, "get_token"):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = StorageBearerTokenCredentialPolicy(cast(TokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+
+        config = kwargs.get("_configuration") or create_configuration(**kwargs)
+        if kwargs.get("_pipeline"):
+            return config, kwargs["_pipeline"]
+        transport = kwargs.get("transport")
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            transport = RequestsTransport(**kwargs)
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            RedirectPolicy(**kwargs),
+            StorageHosts(hosts=self._hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            StorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs)
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  # type: ignore
+        config.transport = transport  # type: ignore
+        return config, Pipeline(transport, policies=policies)
+
+    def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An iterator of HttpResponse objects.
+        :rtype: Iterator[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        batch_id = str(uuid.uuid1())
+
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version,
+                "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False)
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        Pipeline._prepare_multipart_mixed_request(request)  # pylint: disable=protected-access
+        body = serialize_batch_body(request.multipart_mixed_info[0], batch_id)
+        request.set_bytes_body(body)
+
+        temp = request.multipart_mixed_info
+        request.multipart_mixed_info = None
+        pipeline_response = self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+        request.multipart_mixed_info = temp
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts()
+            if raise_on_any_failure:
+                parts = list(response.parts())
+                if any(p for p in parts if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts
+                    )
+                    raise error
+                return iter(parts)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+
+class TransportWrapper(HttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, transport):
+        self._transport = transport
+
+    def send(self, request, **kwargs):
+        return self._transport.send(request, **kwargs)
+
+    def open(self):
+        pass
+
+    def close(self):
+        pass
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, *args):
+        pass
+
+
+def _format_shared_key_credential(
+    account_name: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None  # pylint: disable=line-too-long
+) -> Any:
+    if isinstance(credential, str):
+        if not account_name:
+            raise ValueError("Unable to determine account name for shared key credential.")
+        credential = {"account_name": account_name, "account_key": credential}
+    if isinstance(credential, dict):
+        if "account_name" not in credential:
+            raise ValueError("Shared key credential missing 'account_name")
+        if "account_key" not in credential:
+            raise ValueError("Shared key credential missing 'account_key")
+        return SharedKeyCredentialPolicy(**credential)
+    if isinstance(credential, AzureNamedKeyCredential):
+        return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key)
+    return credential
+
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]]]:  # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+
+def create_configuration(**kwargs: Any) -> StorageConfiguration:
+     # Backwards compatibility if someone is not passing sdk_moniker
+    if not kwargs.get("sdk_moniker"):
+        kwargs["sdk_moniker"] = f"storage-{kwargs.pop('storage_sdk')}/{VERSION}"
+    config = StorageConfiguration(**kwargs)
+    config.headers_policy = StorageHeadersPolicy(**kwargs)
+    config.user_agent_policy = UserAgentPolicy(**kwargs)
+    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+    config.logging_policy = StorageLoggingPolicy(**kwargs)
+    config.proxy_policy = ProxyPolicy(**kwargs)
+    return config
+
+
+def parse_query(query_str: str) -> Tuple[Optional[str], Optional[str]]:
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+    sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values]
+    sas_token = None
+    if sas_params:
+        sas_token = "&".join(sas_params)
+
+    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+    return snapshot, sas_token
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py
new file mode 100644
index 00000000..6186b29d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py
@@ -0,0 +1,280 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# mypy: disable-error-code="attr-defined"
+
+import logging
+from typing import Any, cast, Dict, Optional, Tuple, TYPE_CHECKING, Union
+
+from azure.core.async_paging import AsyncList
+from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+from azure.core.credentials_async import AsyncTokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.policies import (
+    AsyncRedirectPolicy,
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import StorageConfiguration
+from .policies import (
+    QueueMessagePolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageRequestHook,
+)
+from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+_LOGGER = logging.getLogger(__name__)
+
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+    def __enter__(self):
+        raise TypeError("Async client only supports 'async with'.")
+
+    def __exit__(self, *args):
+        pass
+
+    async def __aenter__(self):
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._client.__aexit__(*args)
+
+    async def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self._client.close()
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            query_str += credential.lstrip("?")  # type: ignore [union-attr]
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]] = None, # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, AsyncPipeline]:
+        self._credential_policy: Optional[
+            Union[AsyncStorageBearerTokenCredentialPolicy,
+            SharedKeyCredentialPolicy,
+            AzureSasCredentialPolicy]] = None
+        if hasattr(credential, 'get_token'):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(
+                                        cast(AsyncTokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+        config = kwargs.get('_configuration') or create_configuration(**kwargs)
+        if kwargs.get('_pipeline'):
+            return config, kwargs['_pipeline']
+        transport = kwargs.get('transport')
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            try:
+                from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+            except ImportError as exc:
+                raise ImportError("Unable to create async transport. Please check aiohttp is installed.") from exc
+            transport = AioHttpTransport(**kwargs)
+        hosts = self._hosts
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            AsyncRedirectPolicy(**kwargs),
+            StorageHosts(hosts=hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            AsyncStorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs),
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  #type: ignore
+        config.transport = transport #type: ignore
+        return config, AsyncPipeline(transport, policies=policies) #type: ignore
+
+    async def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> AsyncList["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An AsyncList of HttpResponse objects.
+        :rtype: AsyncList[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)  # type: ignore
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        pipeline_response = await self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts() # Return an AsyncIterator
+            if raise_on_any_failure:
+                parts_list = []
+                async for part in parts:
+                    parts_list.append(part)
+                if any(p for p in parts_list if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts_list
+                    )
+                    raise error
+                return AsyncList(parts_list)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]]]: # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, async_transport):
+        self._transport = async_transport
+
+    async def send(self, request, **kwargs):
+        return await self._transport.send(request, **kwargs)
+
+    async def open(self):
+        pass
+
+    async def close(self):
+        pass
+
+    async def __aenter__(self):
+        pass
+
+    async def __aexit__(self, *args):
+        pass
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py
new file mode 100644
index 00000000..0b4b029a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py
@@ -0,0 +1,19 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from .._serialize import _SUPPORTED_API_VERSIONS
+
+
+X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1]
+
+# Default socket timeouts, in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 60
+
+DEFAULT_OAUTH_SCOPE = "/.default"
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py
new file mode 100644
index 00000000..183d6f64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py
@@ -0,0 +1,585 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.configuration import Configuration
+from azure.core.pipeline.policies import UserAgentPolicy
+
+
+def get_enum_value(value):
+    if value is None or value in ["None", ""]:
+        return None
+    try:
+        return value.value
+    except AttributeError:
+        return value
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    # Generic storage values
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+
+    # Blob values
+    APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
+    BLOB_ACCESS_TIER_NOT_SUPPORTED_FOR_ACCOUNT_TYPE = "BlobAccessTierNotSupportedForAccountType"
+    BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
+    BLOB_NOT_FOUND = "BlobNotFound"
+    BLOB_OVERWRITTEN = "BlobOverwritten"
+    BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
+    BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
+    BLOCK_LIST_TOO_LONG = "BlockListTooLong"
+    CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
+    CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
+    CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
+    CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
+    CONTAINER_DISABLED = "ContainerDisabled"
+    CONTAINER_NOT_FOUND = "ContainerNotFound"
+    CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
+    COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
+    COPY_ID_MISMATCH = "CopyIdMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
+    INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
+    INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead.
+    INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
+    INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
+    INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
+    INVALID_BLOB_TIER = "InvalidBlobTier"
+    INVALID_BLOB_TYPE = "InvalidBlobType"
+    INVALID_BLOCK_ID = "InvalidBlockId"
+    INVALID_BLOCK_LIST = "InvalidBlockList"
+    INVALID_OPERATION = "InvalidOperation"
+    INVALID_PAGE_RANGE = "InvalidPageRange"
+    INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
+    INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
+    INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
+    LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
+    LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
+    LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
+    LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
+    LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
+    LEASE_ID_MISSING = "LeaseIdMissing"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
+    LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
+    LEASE_LOST = "LeaseLost"
+    LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
+    LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
+    LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
+    MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
+    NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
+    OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
+    PENDING_COPY_OPERATION = "PendingCopyOperation"
+    PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
+    SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
+    SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
+    SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
+    SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead.
+    SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    SNAPSHOTS_PRESENT = "SnapshotsPresent"
+    SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
+    SYSTEM_IN_USE = "SystemInUse"
+    TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
+    UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
+    BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
+    BLOB_ARCHIVED = "BlobArchived"
+    BLOB_NOT_ARCHIVED = "BlobNotArchived"
+
+    # Queue values
+    INVALID_MARKER = "InvalidMarker"
+    MESSAGE_NOT_FOUND = "MessageNotFound"
+    MESSAGE_TOO_LARGE = "MessageTooLarge"
+    POP_RECEIPT_MISMATCH = "PopReceiptMismatch"
+    QUEUE_ALREADY_EXISTS = "QueueAlreadyExists"
+    QUEUE_BEING_DELETED = "QueueBeingDeleted"
+    QUEUE_DISABLED = "QueueDisabled"
+    QUEUE_NOT_EMPTY = "QueueNotEmpty"
+    QUEUE_NOT_FOUND = "QueueNotFound"
+
+    # File values
+    CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory"
+    CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay"
+    DELETE_PENDING = "DeletePending"
+    DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty"
+    FILE_LOCK_CONFLICT = "FileLockConflict"
+    FILE_SHARE_PROVISIONED_BANDWIDTH_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedBandwidthDowngradeNotAllowed"
+    FILE_SHARE_PROVISIONED_IOPS_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedIopsDowngradeNotAllowed"
+    INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName"
+    PARENT_NOT_FOUND = "ParentNotFound"
+    READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute"
+    SHARE_ALREADY_EXISTS = "ShareAlreadyExists"
+    SHARE_BEING_DELETED = "ShareBeingDeleted"
+    SHARE_DISABLED = "ShareDisabled"
+    SHARE_NOT_FOUND = "ShareNotFound"
+    SHARING_VIOLATION = "SharingViolation"
+    SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress"
+    SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded"
+    SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported"
+    SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots"
+    CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed"
+
+    # DataLake values
+    CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero'
+    PATH_ALREADY_EXISTS = 'PathAlreadyExists'
+    INVALID_FLUSH_POSITION = 'InvalidFlushPosition'
+    INVALID_PROPERTY_NAME = 'InvalidPropertyName'
+    INVALID_SOURCE_URI = 'InvalidSourceUri'
+    UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion'
+    FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound'
+    PATH_NOT_FOUND = 'PathNotFound'
+    RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound'
+    SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound'
+    DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted'
+    FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists'
+    FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted'
+    INVALID_DESTINATION_PATH = 'InvalidDestinationPath'
+    INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath'
+    INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType'
+    LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken'
+    LEASE_NAME_MISMATCH = 'LeaseNameMismatch'
+    PATH_CONFLICT = 'PathConflict'
+    SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+    def __setitem__(self, key, item):
+        self.__dict__[key] = item
+
+    def __getitem__(self, key):
+        return self.__dict__[key]
+
+    def __repr__(self):
+        return str(self)
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __delitem__(self, key):
+        self.__dict__[key] = None
+
+    # Compare objects by comparing all attributes.
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    # Compare objects by comparing all attributes.
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+    def __contains__(self, key):
+        return key in self.__dict__
+
+    def has_key(self, k):
+        return k in self.__dict__
+
+    def update(self, *args, **kwargs):
+        return self.__dict__.update(*args, **kwargs)
+
+    def keys(self):
+        return [k for k in self.__dict__ if not k.startswith('_')]
+
+    def values(self):
+        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def items(self):
+        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def get(self, key, default=None):
+        if key in self.__dict__:
+            return self.__dict__[key]
+        return default
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g., Get/Set Service Properties,
+        Get Service Stats, List Containers/Queues/Shares)
+    :param bool container:
+        Access to container-level APIs (e.g., Create/Delete Container,
+        Create/Delete Queue, Create/Delete Share,
+        List Blobs/Files and Directories)
+    :param bool object:
+        Access to object-level APIs for blobs, queue messages, and
+        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+    """
+
+    service: bool = False
+    container: bool = False
+    object: bool = False
+    _str: str
+
+    def __init__(
+        self,
+        service: bool = False,
+        container: bool = False,
+        object: bool = False  # pylint: disable=redefined-builtin
+    ) -> None:
+        self.service = service
+        self.container = container
+        self.object = object
+        self._str = (('s' if self.service else '') +
+                ('c' if self.container else '') +
+                ('o' if self.object else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create a ResourceTypes from a string.
+
+        To specify service, container, or object you need only to
+        include the first letter of the word in the string. E.g. service and container,
+        you would provide a string "sc".
+
+        :param str string: Specify service, container, or object in
+            in the string with the first letter of the word.
+        :return: A ResourceTypes object
+        :rtype: ~azure.storage.blob.ResourceTypes
+        """
+        res_service = 's' in string
+        res_container = 'c' in string
+        res_object = 'o' in string
+
+        parsed = cls(res_service, res_container, res_object)
+        parsed._str = string
+        return parsed
+
+
+class AccountSasPermissions(object):
+    """
+    :class:`~ResourceTypes` class to be used with generate_account_sas
+    function and for the AccessPolicies used with set_*_acl. There are two types of
+    SAS which may be used to grant resource access. One is to grant access to a
+    specific resource (resource-specific). Another is to grant access to the
+    entire service for a specific account and allow certain operations based on
+    perms found here.
+
+    :param bool read:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits read permissions to the specified resource type.
+    :param bool write:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits write permissions to the specified resource type.
+    :param bool delete:
+        Valid for Container and Object resource types, except for queue messages.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool list:
+        Valid for Service and Container resource types only.
+    :param bool add:
+        Valid for the following Object resource types only: queue messages, and append blobs.
+    :param bool create:
+        Valid for the following Object resource types only: blobs and files.
+        Users can create new blobs or files, but may not overwrite existing
+        blobs or files.
+    :param bool update:
+        Valid for the following Object resource types only: queue messages.
+    :param bool process:
+        Valid for the following Object resource type only: queue messages.
+    :keyword bool tag:
+        To enable set or get tags on the blobs in the container.
+    :keyword bool filter_by_tags:
+        To enable get blobs by tags, this should be used together with list permission.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+        Valid for Object resource type of Blob only.
+    """
+
+    read: bool = False
+    write: bool = False
+    delete: bool = False
+    delete_previous_version: bool = False
+    list: bool = False
+    add: bool = False
+    create: bool = False
+    update: bool = False
+    process: bool = False
+    tag: bool = False
+    filter_by_tags: bool = False
+    set_immutability_policy: bool = False
+    permanent_delete: bool = False
+
+    def __init__(
+        self,
+        read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,  # pylint: disable=redefined-builtin
+        add: bool = False,
+        create: bool = False,
+        update: bool = False,
+        process: bool = False,
+        delete_previous_version: bool = False,
+        **kwargs
+    ) -> None:
+        self.read = read
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.list = list
+        self.add = add
+        self.create = create
+        self.update = update
+        self.process = process
+        self.tag = kwargs.pop('tag', False)
+        self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('l' if self.list else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('u' if self.update else '') +
+                     ('p' if self.process else '') +
+                     ('f' if self.filter_by_tags else '') +
+                     ('t' if self.tag else '') +
+                     ('i' if self.set_immutability_policy else '')
+                     )
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create AccountSasPermissions from a string.
+
+        To specify read, write, delete, etc. permissions you need only to
+        include the first letter of the word in the string. E.g. for read and write
+        permissions you would provide a string "rw".
+
+        :param str permission: Specify permissions in
+            the string with the first letter of the word.
+        :return: An AccountSasPermissions object
+        :rtype: ~azure.storage.filedatalake.AccountSasPermissions
+        """
+        p_read = 'r' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_list = 'l' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_update = 'u' in permission
+        p_process = 'p' in permission
+        p_tag = 't' in permission
+        p_filter_by_tags = 'f' in permission
+        p_set_immutability_policy = 'i' in permission
+        parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+                     list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+                     filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy,
+                     permanent_delete=p_permanent_delete)
+
+        return parsed
+
+
+class Services(object):
+    """Specifies the services accessible with the account SAS.
+
+    :keyword bool blob:
+        Access for the `~azure.storage.blob.BlobServiceClient`. Default is False.
+    :keyword bool queue:
+        Access for the `~azure.storage.queue.QueueServiceClient`. Default is False.
+    :keyword bool fileshare:
+        Access for the `~azure.storage.fileshare.ShareServiceClient`. Default is False.
+    """
+
+    def __init__(
+        self, *,
+        blob: bool = False,
+        queue: bool = False,
+        fileshare: bool = False
+    ) -> None:
+        self.blob = blob
+        self.queue = queue
+        self.fileshare = fileshare
+        self._str = (('b' if self.blob else '') +
+                ('q' if self.queue else '') +
+                ('f' if self.fileshare else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create Services from a string.
+
+        To specify blob, queue, or file you need only to
+        include the first letter of the word in the string. E.g. for blob and queue
+        you would provide a string "bq".
+
+        :param str string: Specify blob, queue, or file in
+            in the string with the first letter of the word.
+        :return: A Services object
+        :rtype: ~azure.storage.blob.Services
+        """
+        res_blob = 'b' in string
+        res_queue = 'q' in string
+        res_file = 'f' in string
+
+        parsed = cls(blob=res_blob, queue=res_queue, fileshare=res_file)
+        parsed._str = string
+        return parsed
+
+
+class UserDelegationKey(object):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+    """
+
+    signed_oid: Optional[str] = None
+    """Object ID of this token."""
+    signed_tid: Optional[str] = None
+    """Tenant ID of the tenant that issued this token."""
+    signed_start: Optional[str] = None
+    """The datetime this token becomes valid."""
+    signed_expiry: Optional[str] = None
+    """The datetime this token expires."""
+    signed_service: Optional[str] = None
+    """What service this key is valid for."""
+    signed_version: Optional[str] = None
+    """The version identifier of the REST service that created this token."""
+    value: Optional[str] = None
+    """The user delegation key."""
+
+    def __init__(self):
+        self.signed_oid = None
+        self.signed_tid = None
+        self.signed_start = None
+        self.signed_expiry = None
+        self.signed_service = None
+        self.signed_version = None
+        self.value = None
+
+
+class StorageConfiguration(Configuration):
+    """
+    Specifies the configurable values used in Azure Storage.
+
+    :param int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :param int copy_polling_interval: The interval in seconds for polling copy operations.
+    :param int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob.
+    :param bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :param int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_chunk_upload_threshold: The max size for a single put operation.
+    :param int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :param int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :param int max_range_size: The max range size for file upload.
+
+    """
+
+    max_single_put_size: int
+    copy_polling_interval: int
+    max_block_size: int
+    min_large_block_upload_threshold: int
+    use_byte_buffer: bool
+    max_page_size: int
+    min_large_chunk_upload_threshold: int
+    max_single_get_size: int
+    max_chunk_get_size: int
+    max_range_size: int
+    user_agent_policy: UserAgentPolicy
+
+    def __init__(self, **kwargs):
+        super(StorageConfiguration, self).__init__(**kwargs)
+        self.max_single_put_size = kwargs.pop('max_single_put_size', 64 * 1024 * 1024)
+        self.copy_polling_interval = 15
+        self.max_block_size = kwargs.pop('max_block_size', 4 * 1024 * 1024)
+        self.min_large_block_upload_threshold = kwargs.get('min_large_block_upload_threshold', 4 * 1024 * 1024 + 1)
+        self.use_byte_buffer = kwargs.pop('use_byte_buffer', False)
+        self.max_page_size = kwargs.pop('max_page_size', 4 * 1024 * 1024)
+        self.min_large_chunk_upload_threshold = kwargs.pop('min_large_chunk_upload_threshold', 100 * 1024 * 1024 + 1)
+        self.max_single_get_size = kwargs.pop('max_single_get_size', 32 * 1024 * 1024)
+        self.max_chunk_get_size = kwargs.pop('max_chunk_get_size', 4 * 1024 * 1024)
+        self.max_range_size = kwargs.pop('max_range_size', 4 * 1024 * 1024)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py
new file mode 100644
index 00000000..112c1984
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py
@@ -0,0 +1,53 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import datetime, timezone
+from typing import Optional
+
+EPOCH_AS_FILETIME = 116444736000000000  # January 1, 1970 as MS filetime
+HUNDREDS_OF_NANOSECONDS = 10000000
+
+
+def _to_utc_datetime(value: datetime) -> str:
+    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+def _rfc_1123_to_datetime(rfc_1123: str) -> Optional[datetime]:
+    """Converts an RFC 1123 date string to a UTC datetime.
+
+    :param str rfc_1123: The time and date in RFC 1123 format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not rfc_1123:
+        return None
+
+    return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z")
+
+
+def _filetime_to_datetime(filetime: str) -> Optional[datetime]:
+    """Converts an MS filetime string to a UTC datetime. "0" indicates None.
+    If parsing MS Filetime fails, tries RFC 1123 as backup.
+
+    :param str filetime: The time and date in MS filetime format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not filetime:
+        return None
+
+    # Try to convert to MS Filetime
+    try:
+        temp_filetime = int(filetime)
+        if temp_filetime == 0:
+            return None
+
+        return datetime.fromtimestamp((temp_filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc)
+    except ValueError:
+        pass
+
+    # Try RFC 1123 as backup
+    return _rfc_1123_to_datetime(filetime)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py
new file mode 100644
index 00000000..ee75cd5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py
@@ -0,0 +1,694 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import logging
+import random
+import re
+import uuid
+from io import SEEK_SET, UnsupportedOperation
+from time import time
+from typing import Any, Dict, Optional, TYPE_CHECKING
+from urllib.parse import (
+    parse_qsl,
+    urlencode,
+    urlparse,
+    urlunparse,
+)
+from wsgiref.handlers import format_date_time
+
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+from azure.core.pipeline.policies import (
+    BearerTokenCredentialPolicy,
+    HeadersPolicy,
+    HTTPPolicy,
+    NetworkTraceLoggingPolicy,
+    RequestHistory,
+    SansIOHTTPPolicy
+)
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .models import LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import TokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+# Are we out of retries?
+def is_exhausted(settings):
+    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+    retry_counts = list(filter(None, retry_counts))
+    if not retry_counts:
+        return False
+    return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+# Is this method/status code retryable? (Based on allowlists and control
+# variables such as the number of total retries to allow, whether to
+# respect the Retry-After header, whether this header is present, and
+# whether the returned status code is on the list of status codes to
+# be retried upon on the presence of the aforementioned header)
+def is_retry(response, mode):
+    status = response.http_response.status_code
+    if 300 <= status < 500:
+        # An exception occurred, but in most cases it was expected. Examples could
+        # include a 309 Conflict or 412 Precondition Failed.
+        if status == 404 and mode == LocationMode.SECONDARY:
+            # Response code 404 should be retried if secondary was used.
+            return True
+        if status == 408:
+            # Response code 408 is a timeout and should be retried.
+            return True
+        return False
+    if status >= 500:
+        # Response codes above 500 with the exception of 501 Not Implemented and
+        # 505 Version Not Supported indicate a server issue and should be retried.
+        if status in [501, 505]:
+            return False
+        return True
+    return False
+
+
+def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+def urljoin(base_url, stub_url):
+    parsed = urlparse(base_url)
+    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+    return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+    def on_request(self, request):
+        message_id = request.context.options.pop('queue_message_id', None)
+        if message_id:
+            request.http_request.url = urljoin(
+                request.http_request.url,
+                message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+    request_id_header_name = 'x-ms-client-request-id'
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        super(StorageHeadersPolicy, self).on_request(request)
+        current_time = format_date_time(time())
+        request.http_request.headers['x-ms-date'] = current_time
+
+        custom_id = request.context.options.pop('client_request_id', None)
+        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+    # def on_response(self, request, response):
+    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
+    #     if self.request_id_header_name in response.http_response.headers:
+
+    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
+    #             raise AzureError(
+    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
+    #                 "Service request ID: {}".format(
+    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
+    #                     response.http_response.headers['x-ms-request-id']),
+    #                 response=response.http_response
+    #             )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
+        self.hosts = hosts
+        super(StorageHosts, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request.context.options['hosts'] = self.hosts
+        parsed_url = urlparse(request.http_request.url)
+
+        # Detect what location mode we're currently requesting with
+        location_mode = LocationMode.PRIMARY
+        for key, value in self.hosts.items():
+            if parsed_url.netloc == value:
+                location_mode = key
+
+        # See if a specific location mode has been specified, and if so, redirect
+        use_location = request.context.options.pop('use_location', None)
+        if use_location:
+            # Lock retries to the specific location
+            request.context.options['retry_to_secondary'] = False
+            if use_location not in self.hosts:
+                raise ValueError(f"Attempting to use undefined host location {use_location}")
+            if use_location != location_mode:
+                # Update request URL to use the specified location
+                updated = parsed_url._replace(netloc=self.hosts[use_location])
+                request.http_request.url = updated.geturl()
+                location_mode = use_location
+
+        request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+    """A policy that logs HTTP request and response to the DEBUG logger.
+
+    This accepts both global configuration, and per-request level with "enable_http_logger"
+    """
+
+    def __init__(self, logging_enable: bool = False, **kwargs) -> None:
+        self.logging_body = kwargs.pop("logging_body", False)
+        super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs)
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        http_request = request.http_request
+        options = request.context.options
+        self.logging_body = self.logging_body or options.pop("logging_body", False)
+        if options.pop("logging_enable", self.enable_http_logger):
+            request.context["logging_enable"] = True
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                log_url = http_request.url
+                query_params = http_request.query
+                if 'sig' in query_params:
+                    log_url = log_url.replace(query_params['sig'], "sig=*****")
+                _LOGGER.debug("Request URL: %r", log_url)
+                _LOGGER.debug("Request method: %r", http_request.method)
+                _LOGGER.debug("Request headers:")
+                for header, value in http_request.headers.items():
+                    if header.lower() == 'authorization':
+                        value = '*****'
+                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+                        # take the url apart and scrub away the signed signature
+                        scheme, netloc, path, params, query, fragment = urlparse(value)
+                        parsed_qs = dict(parse_qsl(query))
+                        parsed_qs['sig'] = '*****'
+
+                        # the SAS needs to be put back together
+                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+                    _LOGGER.debug("    %r: %r", header, value)
+                _LOGGER.debug("Request body:")
+
+                if self.logging_body:
+                    _LOGGER.debug(str(http_request.body))
+                else:
+                    # We don't want to log the binary data of a file upload.
+                    _LOGGER.debug("Hidden body, please use logging_body to show body")
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log request: %r", err)
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.pop("logging_enable", self.enable_http_logger):
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                _LOGGER.debug("Response status: %r", response.http_response.status_code)
+                _LOGGER.debug("Response headers:")
+                for res_header, value in response.http_response.headers.items():
+                    _LOGGER.debug("    %r: %r", res_header, value)
+
+                # We don't want to log binary data if the response is a file.
+                _LOGGER.debug("Response content:")
+                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+                header = response.http_response.headers.get('content-disposition')
+                resp_content_type = response.http_response.headers.get("content-type", "")
+
+                if header and pattern.match(header):
+                    filename = header.partition('=')[2]
+                    _LOGGER.debug("File attachments: %s", filename)
+                elif resp_content_type.endswith("octet-stream"):
+                    _LOGGER.debug("Body contains binary data.")
+                elif resp_content_type.startswith("image"):
+                    _LOGGER.debug("Body contains image data.")
+
+                if self.logging_body and resp_content_type.startswith("text"):
+                    _LOGGER.debug(response.http_response.text())
+                elif self.logging_body:
+                    try:
+                        _LOGGER.debug(response.http_response.body())
+                    except ValueError:
+                        _LOGGER.debug("Body is streamable")
+
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._request_callback = kwargs.get('raw_request_hook')
+        super(StorageRequestHook, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+        if request_callback:
+            request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(StorageResponseHook, self).__init__()
+
+    def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+    """A simple policy that sends the given headers
+    with the request.
+
+    This will overwrite any headers already defined in the request.
+    """
+    header_name = 'Content-MD5'
+
+    def __init__(self, **kwargs: Any) -> None:  # pylint: disable=unused-argument
+        super(StorageContentValidation, self).__init__()
+
+    @staticmethod
+    def get_content_md5(data):
+        # Since HTTP does not differentiate between no content and empty content,
+        # we have to perform a None check.
+        data = data or b""
+        md5 = hashlib.md5() # nosec
+        if isinstance(data, bytes):
+            md5.update(data)
+        elif hasattr(data, 'read'):
+            pos = 0
+            try:
+                pos = data.tell()
+            except:  # pylint: disable=bare-except
+                pass
+            for chunk in iter(lambda: data.read(4096), b""):
+                md5.update(chunk)
+            try:
+                data.seek(pos, SEEK_SET)
+            except (AttributeError, IOError) as exc:
+                raise ValueError("Data should be bytes or a seekable file-like object.") from exc
+        else:
+            raise ValueError("Data should be bytes or a seekable file-like object.")
+
+        return md5.digest()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        validate_content = request.context.options.pop('validate_content', False)
+        if validate_content and request.http_request.method != 'GET':
+            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+            request.http_request.headers[self.header_name] = computed_md5
+            request.context['validate_content_md5'] = computed_md5
+        request.context['validate_content'] = validate_content
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+            computed_md5 = request.context.get('validate_content_md5') or \
+                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+            if response.http_response.headers['content-md5'] != computed_md5:
+                raise AzureError((
+                    f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', "
+                    f"computed value is '{computed_md5}'."),
+                    response=response.http_response
+                )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    total_retries: int
+    """The max number of retries."""
+    connect_retries: int
+    """The max number of connect retries."""
+    retry_read: int
+    """The max number of read retries."""
+    retry_status: int
+    """The max number of status retries."""
+    retry_to_secondary: bool
+    """Whether the secondary endpoint should be retried."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.total_retries = kwargs.pop('retry_total', 10)
+        self.connect_retries = kwargs.pop('retry_connect', 3)
+        self.read_retries = kwargs.pop('retry_read', 3)
+        self.status_retries = kwargs.pop('retry_status', 3)
+        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+        super(StorageRetryPolicy, self).__init__()
+
+    def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
+        """
+        A function which sets the next host location on the request, if applicable.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the next host location.
+        :param PipelineRequest request: A pipeline request object.
+        """
+        if settings['hosts'] and all(settings['hosts'].values()):
+            url = urlparse(request.url)
+            # If there's more than one possible location, retry to the alternative
+            if settings['mode'] == LocationMode.PRIMARY:
+                settings['mode'] = LocationMode.SECONDARY
+            else:
+                settings['mode'] = LocationMode.PRIMARY
+            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+            request.url = updated.geturl()
+
+    def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
+        body_position = None
+        if hasattr(request.http_request.body, 'read'):
+            try:
+                body_position = request.http_request.body.tell()
+            except (AttributeError, UnsupportedOperation):
+                # if body position cannot be obtained, then retries will not work
+                pass
+        options = request.context.options
+        return {
+            'total': options.pop("retry_total", self.total_retries),
+            'connect': options.pop("retry_connect", self.connect_retries),
+            'read': options.pop("retry_read", self.read_retries),
+            'status': options.pop("retry_status", self.status_retries),
+            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+            'mode': options.pop("location_mode", LocationMode.PRIMARY),
+            'hosts': options.pop("hosts", None),
+            'hook': options.pop("retry_hook", None),
+            'body_position': body_position,
+            'count': 0,
+            'history': []
+        }
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:  # pylint: disable=unused-argument
+        """ Formula for computing the current backoff.
+        Should be calculated by child class.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :returns: The backoff time.
+        :rtype: float
+        """
+        return 0
+
+    def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        transport.sleep(backoff)
+
+    def increment(
+        self, settings: Dict[str, Any],
+        request: "PipelineRequest",
+        response: Optional["PipelineResponse"] = None,
+        error: Optional[AzureError] = None
+    ) -> bool:
+        """Increment the retry counters.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the increment operation.
+        :param PipelineRequest request: A pipeline request object.
+        :param Optional[PipelineResponse] response: A pipeline response object.
+        :param Optional[AzureError] error: An error encountered during the request, or
+            None if the response was received successfully.
+        :returns: Whether the retry attempts are exhausted.
+        :rtype: bool
+        """
+        settings['total'] -= 1
+
+        if error and isinstance(error, ServiceRequestError):
+            # Errors when we're fairly sure that the server did not receive the
+            # request, so it should be safe to retry.
+            settings['connect'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        elif error and isinstance(error, ServiceResponseError):
+            # Errors that occur after the request has been started, so we should
+            # assume that the server began processing it.
+            settings['read'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the allowlist
+            if response:
+                settings['status'] -= 1
+                settings['history'].append(RequestHistory(request, http_response=response))
+
+        if not is_exhausted(settings):
+            if request.method not in ['PUT'] and settings['retry_secondary']:
+                self._set_next_host_location(settings, request)
+
+            # rewind the request body if it is a stream
+            if request.body and hasattr(request.body, 'read'):
+                # no position was saved, then retry would not work
+                if settings['body_position'] is None:
+                    return False
+                try:
+                    # attempt to rewind the body to the initial position
+                    request.body.seek(settings['body_position'], SEEK_SET)
+                except (UnsupportedOperation, ValueError):
+                    # if body is not seekable, then retry would not work
+                    return False
+            settings['count'] += 1
+            return True
+        return False
+
+    def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to get backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
+        super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py
new file mode 100644
index 00000000..1c030a82
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py
@@ -0,0 +1,296 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import logging
+import random
+from typing import Any, Dict, TYPE_CHECKING
+
+from azure.core.exceptions import AzureError, StreamClosedError, StreamConsumedError
+from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .policies import encode_base64, is_retry, StorageContentValidation, StorageRetryPolicy
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        if asyncio.iscoroutine(settings['hook']):
+            await settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+        else:
+            settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+
+
+async def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        try:
+            await response.http_response.load_body()  # Load the body in memory and close the socket
+        except (StreamClosedError, StreamConsumedError):
+            pass
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(AsyncStorageResponseHook, self).__init__()
+
+    async def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = await self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or await is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            if asyncio.iscoroutine(response_callback):
+                await response_callback(response) # type: ignore
+            else:
+                response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    async def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        await transport.sleep(backoff)
+
+    async def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = await self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or await is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        await retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        await self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    await retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    await self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self,
+        initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3, **kwargs
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds. For example, by default the first retry
+        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+        third after (15+3^2) = 24 seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "AsyncTokenCredential", audience: str, **kwargs: Any) -> None:
+        super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    async def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        await self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py
new file mode 100644
index 00000000..54927cc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py
@@ -0,0 +1,270 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import stat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+from os import fstat
+from typing import Dict, Optional
+
+import isodate
+
+
+_LOGGER = logging.getLogger(__name__)
+
+_REQUEST_DELIMITER_PREFIX = "batch_"
+_HTTP1_1_IDENTIFIER = "HTTP/1.1"
+_HTTP_LINE_ENDING = "\r\n"
+
+
+def serialize_iso(attr):
+    """Serialize Datetime object into ISO-8601 formatted string.
+
+    :param Datetime attr: Object to be serialized.
+    :rtype: str
+    :raises: ValueError if format invalid.
+    """
+    if not attr:
+        return None
+    if isinstance(attr, str):
+        attr = isodate.parse_datetime(attr)
+    try:
+        utc = attr.utctimetuple()
+        if utc.tm_year > 9999 or utc.tm_year < 1:
+            raise OverflowError("Hit max or min date")
+
+        date = f"{utc.tm_year:04}-{utc.tm_mon:02}-{utc.tm_mday:02}T{utc.tm_hour:02}:{utc.tm_min:02}:{utc.tm_sec:02}"
+        return date + 'Z'
+    except (ValueError, OverflowError) as err:
+        raise ValueError("Unable to serialize datetime object.") from err
+    except AttributeError as err:
+        raise TypeError("ISO-8601 object must be valid datetime object.") from err
+
+def get_length(data):
+    length = None
+    # Check if object implements the __len__ method, covers most input cases such as bytearray.
+    try:
+        length = len(data)
+    except:  # pylint: disable=bare-except
+        pass
+
+    if not length:
+        # Check if the stream is a file-like stream object.
+        # If so, calculate the size using the file descriptor.
+        try:
+            fileno = data.fileno()
+        except (AttributeError, UnsupportedOperation):
+            pass
+        else:
+            try:
+                mode = fstat(fileno).st_mode
+                if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
+                    #st_size only meaningful if regular file or symlink, other types
+                    # e.g. sockets may return misleading sizes like 0
+                    return fstat(fileno).st_size
+            except OSError:
+                # Not a valid fileno, may be possible requests returned
+                # a socket number?
+                pass
+
+        # If the stream is seekable and tell() is implemented, calculate the stream size.
+        try:
+            current_position = data.tell()
+            data.seek(0, SEEK_END)
+            length = data.tell() - current_position
+            data.seek(current_position, SEEK_SET)
+        except (AttributeError, OSError, UnsupportedOperation):
+            pass
+
+    return length
+
+
+def read_length(data):
+    try:
+        if hasattr(data, 'read'):
+            read_data = b''
+            for chunk in iter(lambda: data.read(4096), b""):
+                read_data += chunk
+            return len(read_data), read_data
+        if hasattr(data, '__iter__'):
+            read_data = b''
+            for chunk in data:
+                read_data += chunk
+            return len(read_data), read_data
+    except:  # pylint: disable=bare-except
+        pass
+    raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+        start_range, end_range, start_range_required=True,
+        end_range_required=True, check_content_md5=False, align_to_page=False):
+    # If end range is provided, start range must be provided
+    if (start_range_required or end_range is not None) and start_range is None:
+        raise ValueError("start_range value cannot be None.")
+    if end_range_required and end_range is None:
+        raise ValueError("end_range value cannot be None.")
+
+    # Page ranges must be 512 aligned
+    if align_to_page:
+        if start_range is not None and start_range % 512 != 0:
+            raise ValueError(f"Invalid page blob start_range: {start_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        if end_range is not None and end_range % 512 != 511:
+            raise ValueError(f"Invalid page blob end_range: {end_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+
+    # Format based on whether end_range is present
+    range_header = None
+    if end_range is not None:
+        range_header = f'bytes={start_range}-{end_range}'
+    elif start_range is not None:
+        range_header = f"bytes={start_range}-"
+
+    # Content MD5 can only be provided for a complete range less than 4MB in size
+    range_validation = None
+    if check_content_md5:
+        if start_range is None or end_range is None:
+            raise ValueError("Both start and end range required for MD5 content validation.")
+        if end_range - start_range > 4 * 1024 * 1024:
+            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+        range_validation = 'true'
+
+    return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+    headers = {}
+    if metadata:
+        for key, value in metadata.items():
+            headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value
+    return headers
+
+
+def serialize_batch_body(requests, batch_id):
+    """
+    --<delimiter>
+    <subrequest>
+    --<delimiter>
+    <subrequest>    (repeated as needed)
+    --<delimiter>--
+
+    Serializes the requests in this batch to a single HTTP mixed/multipart body.
+
+    :param List[~azure.core.pipeline.transport.HttpRequest] requests:
+        a list of sub-request for the batch request
+    :param str batch_id:
+        to be embedded in batch sub-request delimiter
+    :returns: The body bytes for this batch.
+    :rtype: bytes
+    """
+
+    if requests is None or len(requests) == 0:
+        raise ValueError('Please provide sub-request(s) for this batch request')
+
+    delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
+    newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
+    batch_body = []
+
+    content_index = 0
+    for request in requests:
+        request.headers.update({
+            "Content-ID": str(content_index),
+            "Content-Length": str(0)
+        })
+        batch_body.append(delimiter_bytes)
+        batch_body.append(_make_body_from_sub_request(request))
+        batch_body.append(newline_bytes)
+        content_index += 1
+
+    batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
+    # final line of body MUST have \r\n at the end, or it will not be properly read by the service
+    batch_body.append(newline_bytes)
+
+    return b"".join(batch_body)
+
+
+def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
+    """
+    Gets the delimiter used for this batch request's mixed/multipart HTTP format.
+
+    :param str batch_id:
+        Randomly generated id
+    :param bool is_prepend_dashes:
+        Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
+    :param bool is_append_dashes:
+        Whether to include the ending dashes. Used in the body on the closing delimiter only.
+    :returns: The delimiter, WITHOUT a trailing newline.
+    :rtype: str
+    """
+
+    prepend_dashes = '--' if is_prepend_dashes else ''
+    append_dashes = '--' if is_append_dashes else ''
+
+    return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
+
+
+def _make_body_from_sub_request(sub_request):
+    """
+     Content-Type: application/http
+     Content-ID: <sequential int ID>
+     Content-Transfer-Encoding: <value> (if present)
+
+     <verb> <path><query> HTTP/<version>
+     <header key>: <header value> (repeated as necessary)
+     Content-Length: <value>
+     (newline if content length > 0)
+     <body> (if content length > 0)
+
+     Serializes an http request.
+
+     :param ~azure.core.pipeline.transport.HttpRequest sub_request:
+        Request to serialize.
+     :returns: The serialized sub-request in bytes
+     :rtype: bytes
+     """
+
+    # put the sub-request's headers into a list for efficient str concatenation
+    sub_request_body = []
+
+    # get headers for ease of manipulation; remove headers as they are used
+    headers = sub_request.headers
+
+    # append opening headers
+    sub_request_body.append("Content-Type: application/http")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-ID: ")
+    sub_request_body.append(headers.pop("Content-ID", ""))
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-Transfer-Encoding: binary")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append HTTP verb and path and query and HTTP version
+    sub_request_body.append(sub_request.method)
+    sub_request_body.append(' ')
+    sub_request_body.append(sub_request.url)
+    sub_request_body.append(' ')
+    sub_request_body.append(_HTTP1_1_IDENTIFIER)
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
+    for header_name, header_value in headers.items():
+        if header_value is not None:
+            sub_request_body.append(header_name)
+            sub_request_body.append(": ")
+            sub_request_body.append(header_value)
+            sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    return ''.join(sub_request_body).encode()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py
new file mode 100644
index 00000000..af9a2fcd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py
@@ -0,0 +1,200 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn
+from xml.etree.ElementTree import Element
+
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    DecodeError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceModifiedError,
+    ResourceNotFoundError,
+)
+from azure.core.pipeline.policies import ContentDecodePolicy
+
+from .authentication import AzureSigningError
+from .models import get_enum_value, StorageErrorCode, UserDelegationKey
+from .parser import _to_utc_datetime
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+    """There is a partial failure in batch operations.
+
+    :param str message: The message of the exception.
+    :param response: Server response to be deserialized.
+    :param list parts: A list of the parts in multipart response.
+    """
+
+    def __init__(self, message, response, parts):
+        self.parts = parts
+        super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+# Parses the blob length from the content range header: bytes 1-3/65537
+def parse_length_from_content_range(content_range):
+    if content_range is None:
+        return None
+
+    # First, split in space and take the second half: '1-3/65537'
+    # Next, split on slash and take the second half: '65537'
+    # Finally, convert to an int: 65537
+    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+    return normalized
+
+
+def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
+    try:
+        raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    except AttributeError:
+        raw_metadata = {k: v for k, v in response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return response.http_response.location_mode, deserialized
+
+
+def return_raw_deserialized(response, *_):
+    return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME]
+
+
+def process_storage_error(storage_error) -> NoReturn: # type: ignore [misc] # pylint:disable=too-many-statements, too-many-branches
+    raise_error = HttpResponseError
+    serialized = False
+    if isinstance(storage_error, AzureSigningError):
+        storage_error.message = storage_error.message + \
+            '. This is likely due to an invalid shared key. Please check your shared key and try again.'
+    if not storage_error.response or storage_error.response.status_code in [200, 204]:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (PartialBatchErrorException,
+                                  ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        try:
+            if error_body is None or len(error_body) == 0:
+                error_body = storage_error.response.reason
+        except AttributeError:
+            error_body = ''
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        # There is a chance error_dict is just a string
+        if error_dict and isinstance(error_dict, dict):
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met,
+                              StorageErrorCode.blob_overwritten]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.cannot_verify_copy_source,
+                              StorageErrorCode.blob_not_found,
+                              StorageErrorCode.queue_not_found,
+                              StorageErrorCode.container_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.share_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.blob_already_exists,
+                              StorageErrorCode.queue_already_exists,
+                              StorageErrorCode.container_already_exists,
+                              StorageErrorCode.container_being_deleted,
+                              StorageErrorCode.queue_being_deleted,
+                              StorageErrorCode.share_already_exists,
+                              StorageErrorCode.share_being_deleted]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+    internal_user_delegation_key = UserDelegationKey()
+    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+    internal_user_delegation_key.value = service_user_delegation_key.value
+    return internal_user_delegation_key
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py
new file mode 100644
index 00000000..df29222b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py
@@ -0,0 +1,252 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from datetime import date
+
+from .parser import _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+# cspell:ignoreRegExp rsc.
+# cspell:ignoreRegExp s..?id
+class QueryStringConstants(object):
+    SIGNED_SIGNATURE = 'sig'
+    SIGNED_PERMISSION = 'sp'
+    SIGNED_START = 'st'
+    SIGNED_EXPIRY = 'se'
+    SIGNED_RESOURCE = 'sr'
+    SIGNED_IDENTIFIER = 'si'
+    SIGNED_IP = 'sip'
+    SIGNED_PROTOCOL = 'spr'
+    SIGNED_VERSION = 'sv'
+    SIGNED_CACHE_CONTROL = 'rscc'
+    SIGNED_CONTENT_DISPOSITION = 'rscd'
+    SIGNED_CONTENT_ENCODING = 'rsce'
+    SIGNED_CONTENT_LANGUAGE = 'rscl'
+    SIGNED_CONTENT_TYPE = 'rsct'
+    START_PK = 'spk'
+    START_RK = 'srk'
+    END_PK = 'epk'
+    END_RK = 'erk'
+    SIGNED_RESOURCE_TYPES = 'srt'
+    SIGNED_SERVICES = 'ss'
+    SIGNED_OID = 'skoid'
+    SIGNED_TID = 'sktid'
+    SIGNED_KEY_START = 'skt'
+    SIGNED_KEY_EXPIRY = 'ske'
+    SIGNED_KEY_SERVICE = 'sks'
+    SIGNED_KEY_VERSION = 'skv'
+    SIGNED_ENCRYPTION_SCOPE = 'ses'
+
+    # for ADLS
+    SIGNED_AUTHORIZED_OID = 'saoid'
+    SIGNED_UNAUTHORIZED_OID = 'suoid'
+    SIGNED_CORRELATION_ID = 'scid'
+    SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+    @staticmethod
+    def to_list():
+        return [
+            QueryStringConstants.SIGNED_SIGNATURE,
+            QueryStringConstants.SIGNED_PERMISSION,
+            QueryStringConstants.SIGNED_START,
+            QueryStringConstants.SIGNED_EXPIRY,
+            QueryStringConstants.SIGNED_RESOURCE,
+            QueryStringConstants.SIGNED_IDENTIFIER,
+            QueryStringConstants.SIGNED_IP,
+            QueryStringConstants.SIGNED_PROTOCOL,
+            QueryStringConstants.SIGNED_VERSION,
+            QueryStringConstants.SIGNED_CACHE_CONTROL,
+            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+            QueryStringConstants.SIGNED_CONTENT_ENCODING,
+            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+            QueryStringConstants.SIGNED_CONTENT_TYPE,
+            QueryStringConstants.START_PK,
+            QueryStringConstants.START_RK,
+            QueryStringConstants.END_PK,
+            QueryStringConstants.END_RK,
+            QueryStringConstants.SIGNED_RESOURCE_TYPES,
+            QueryStringConstants.SIGNED_SERVICES,
+            QueryStringConstants.SIGNED_OID,
+            QueryStringConstants.SIGNED_TID,
+            QueryStringConstants.SIGNED_KEY_START,
+            QueryStringConstants.SIGNED_KEY_EXPIRY,
+            QueryStringConstants.SIGNED_KEY_SERVICE,
+            QueryStringConstants.SIGNED_KEY_VERSION,
+            QueryStringConstants.SIGNED_ENCRYPTION_SCOPE,
+            # for ADLS
+            QueryStringConstants.SIGNED_AUTHORIZED_OID,
+            QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+            QueryStringConstants.SIGNED_CORRELATION_ID,
+            QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+        ]
+
+
+class SharedAccessSignature(object):
+    '''
+    Provides a factory for creating account access
+    signature tokens with an account name and account key. Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    '''
+
+    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+        '''
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param str account_key:
+            The access key to generate the shares access signatures.
+        :param str x_ms_version:
+            The service version used to generate the shared access signatures.
+        '''
+        self.account_name = account_name
+        self.account_key = account_key
+        self.x_ms_version = x_ms_version
+
+    def generate_account(
+        self, services,
+        resource_types,
+        permission,
+        expiry,
+        start=None,
+        ip=None,
+        protocol=None,
+        sts_hook=None,
+        **kwargs
+    ) -> str:
+        '''
+        Generates a shared access signature for the account.
+        Use the returned signature with the sas_token parameter of the service
+        or to create a new account object.
+
+        :param Any services: The specified services associated with the shared access signature.
+        :param ResourceTypes resource_types:
+            Specifies the resource types that are accessible with the account
+            SAS. You can combine values to provide access to more than one
+            resource type.
+        :param AccountSasPermissions permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy. You can combine
+            values to provide more than one permission.
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :keyword str encryption_scope:
+            Optional. If specified, this is the encryption scope to use when sending requests
+            authorized with this SAS URI.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        '''
+        sas = _SharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_account(services, resource_types)
+        sas.add_encryption_scope(**kwargs)
+        sas.add_account_signature(self.account_name, self.account_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+    def __init__(self):
+        self.query_dict = {}
+        self.string_to_sign = ""
+
+    def _add_query(self, name, val):
+        if val:
+            self.query_dict[name] = str(val) if val is not None else None
+
+    def add_encryption_scope(self, **kwargs):
+        self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop('encryption_scope', None))
+
+    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+        if isinstance(start, date):
+            start = _to_utc_datetime(start)
+
+        if isinstance(expiry, date):
+            expiry = _to_utc_datetime(expiry)
+
+        self._add_query(QueryStringConstants.SIGNED_START, start)
+        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+        self._add_query(QueryStringConstants.SIGNED_IP, ip)
+        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+    def add_resource(self, resource):
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+    def add_id(self, policy_id):
+        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+    def add_account(self, services, resource_types):
+        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+    def add_override_response_headers(self, cache_control,
+                                      content_disposition,
+                                      content_encoding,
+                                      content_language,
+                                      content_type):
+        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+    def add_account_signature(self, account_name, account_key):
+        def get_value_to_append(query):
+            return_value = self.query_dict.get(query) or ''
+            return return_value + '\n'
+
+        string_to_sign = \
+            (account_name + '\n' +
+             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+             get_value_to_append(QueryStringConstants.SIGNED_START) +
+             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE))
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key, string_to_sign))
+        self.string_to_sign = string_to_sign
+
+    def get_token(self) -> str:
+        return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None])
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py
new file mode 100644
index 00000000..b31cfb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py
@@ -0,0 +1,604 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from concurrent import futures
+from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation
+from itertools import islice
+from math import ceil
+from threading import Lock
+
+from azure.core.tracing.common import with_current_context
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(executor.submit(with_current_context(uploader), next_chunk))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    done, _running = futures.wait(running)
+    range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        validate_content=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        validate_content=validate_content,
+        progress_hook=progress_hook,
+        **kwargs)
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_chunk_streams()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_chunk), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_substream_blocks()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_substream_block), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+    if any(range_ids):
+        return sorted(range_ids)
+    return []
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b""
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if not isinstance(temp, bytes):
+                    raise TypeError("Blob data should be of type bytes.")
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b"" or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    def _update_progress(self, length):
+        if self.progress_lock is not None:
+            with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = self._upload_chunk(chunk_offset, chunk_data)
+        self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    def process_substream_block(self, block_data):
+        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = self._upload_substream_block(index, block_stream)
+        self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop("modified_access_conditions", None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return index, block_id
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        return not any(bytearray(chunk_data))
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f"bytes={chunk_offset}-{chunk_end}"
+            computed_md5 = None
+            self.response_headers = self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+            self.current_length = int(self.response_headers["blob_append_offset"])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        self.response_headers = self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return f'bytes={chunk_offset}-{chunk_end}', response
+
+    # TODO: Implement this method.
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class SubStream(IOBase):
+
+    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+        # Python 2.7: file-like objects created with open() typically support seek(), but are not
+        # derivations of io.IOBase and thus do not implement seekable().
+        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+        try:
+            # only the main thread runs this, so there's no need grabbing the lock
+            wrapped_stream.seek(0, SEEK_CUR)
+        except Exception as exc:
+            raise ValueError("Wrapped stream must support seek().") from exc
+
+        self._lock = lockObj
+        self._wrapped_stream = wrapped_stream
+        self._position = 0
+        self._stream_begin_index = stream_begin_index
+        self._length = length
+        self._buffer = BytesIO()
+
+        # we must avoid buffering more than necessary, and also not use up too much memory
+        # so the max buffer size is capped at 4MB
+        self._max_buffer_size = (
+            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+        )
+        self._current_buffer_start = 0
+        self._current_buffer_size = 0
+        super(SubStream, self).__init__()
+
+    def __len__(self):
+        return self._length
+
+    def close(self):
+        if self._buffer:
+            self._buffer.close()
+        self._wrapped_stream = None
+        IOBase.close(self)
+
+    def fileno(self):
+        return self._wrapped_stream.fileno()
+
+    def flush(self):
+        pass
+
+    def read(self, size=None):
+        if self.closed:  # pylint: disable=using-constant-test
+            raise ValueError("Stream is closed.")
+
+        if size is None:
+            size = self._length - self._position
+
+        # adjust if out of bounds
+        if size + self._position >= self._length:
+            size = self._length - self._position
+
+        # return fast
+        if size == 0 or self._buffer.closed:
+            return b""
+
+        # attempt first read from the read buffer and update position
+        read_buffer = self._buffer.read(size)
+        bytes_read = len(read_buffer)
+        bytes_remaining = size - bytes_read
+        self._position += bytes_read
+
+        # repopulate the read buffer from the underlying stream to fulfill the request
+        # ensure the seek and read operations are done atomically (only if a lock is provided)
+        if bytes_remaining > 0:
+            with self._buffer:
+                # either read in the max buffer size specified on the class
+                # or read in just enough data for the current block/sub stream
+                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+                # lock is only defined if max_concurrency > 1 (parallel uploads)
+                if self._lock:
+                    with self._lock:
+                        # reposition the underlying stream to match the start of the data to read
+                        absolute_position = self._stream_begin_index + self._position
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+                        # If we can't seek to the right location, our read will be corrupted so fail fast.
+                        if self._wrapped_stream.tell() != absolute_position:
+                            raise IOError("Stream failed to seek to the desired location.")
+                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+                else:
+                    absolute_position = self._stream_begin_index + self._position
+                    # It's possible that there's connection problem during data transfer,
+                    # so when we retry we don't want to read from current position of wrapped stream,
+                    # instead we should seek to where we want to read from.
+                    if self._wrapped_stream.tell() != absolute_position:
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+
+                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+            if buffer_from_stream:
+                # update the buffer with new data from the wrapped stream
+                # we need to note down the start position and size of the buffer, in case seek is performed later
+                self._buffer = BytesIO(buffer_from_stream)
+                self._current_buffer_start = self._position
+                self._current_buffer_size = len(buffer_from_stream)
+
+                # read the remaining bytes from the new buffer and update position
+                second_read_buffer = self._buffer.read(bytes_remaining)
+                read_buffer += second_read_buffer
+                self._position += len(second_read_buffer)
+
+        return read_buffer
+
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        raise UnsupportedOperation
+
+    def seek(self, offset, whence=0):
+        if whence is SEEK_SET:
+            start_index = 0
+        elif whence is SEEK_CUR:
+            start_index = self._position
+        elif whence is SEEK_END:
+            start_index = self._length
+            offset = -offset
+        else:
+            raise ValueError("Invalid argument for the 'whence' parameter.")
+
+        pos = start_index + offset
+
+        if pos > self._length:
+            pos = self._length
+        elif pos < 0:
+            pos = 0
+
+        # check if buffer is still valid
+        # if not, drop buffer
+        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+            self._buffer.close()
+            self._buffer = BytesIO()
+        else:  # if yes seek to correct position
+            delta = pos - self._current_buffer_start
+            self._buffer.seek(delta, SEEK_SET)
+
+        self._position = pos
+        return pos
+
+    def seekable(self):
+        return True
+
+    def tell(self):
+        return self._position
+
+    def write(self):
+        raise UnsupportedOperation
+
+    def writelines(self):
+        raise UnsupportedOperation
+
+    def writeable(self):
+        return False
+
+
+class IterStreamer(object):
+    """
+    File-like streaming iterator.
+    """
+
+    def __init__(self, generator, encoding="UTF-8"):
+        self.generator = generator
+        self.iterator = iter(generator)
+        self.leftover = b""
+        self.encoding = encoding
+
+    def __len__(self):
+        return self.generator.__len__()
+
+    def __iter__(self):
+        return self.iterator
+
+    def seekable(self):
+        return False
+
+    def __next__(self):
+        return next(self.iterator)
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    def read(self, size):
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = self.__next__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py
new file mode 100644
index 00000000..3e102ec5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py
@@ -0,0 +1,460 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import asyncio
+import inspect
+import threading
+from asyncio import Lock
+from io import UnsupportedOperation
+from itertools import islice
+from math import ceil
+from typing import AsyncGenerator, Union
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
+
+
+async def _async_parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = await pending.__anext__()
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopAsyncIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def _parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_chunk_streams()
+        running_futures = []
+        for _ in range(max_concurrency):
+            try:
+                chunk = await upload_tasks.__anext__()
+                running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk)))
+            except StopAsyncIteration:
+                break
+
+        range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        async for chunk in uploader.get_chunk_streams():
+            range_ids.append(await uploader.process_chunk(chunk))
+
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+async def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_substream_blocks()
+        running_futures = [
+            asyncio.ensure_future(uploader.process_substream_block(u))
+            for u in islice(upload_tasks, 0, max_concurrency)
+        ]
+        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        for block in uploader.get_substream_blocks():
+            range_ids.append(await uploader.process_substream_block(block))
+    if any(range_ids):
+        return sorted(range_ids)
+    return
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = threading.Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    async def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b''
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if inspect.isawaitable(temp):
+                    temp = await temp
+                if not isinstance(temp, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b'' or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    async def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    async def _update_progress(self, length):
+        if self.progress_lock is not None:
+            async with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await self.progress_hook(self.progress_total, self.total_size)
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = await self._upload_chunk(chunk_offset, chunk_data)
+        await self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    async def process_substream_block(self, block_data):
+        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    async def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = await self._upload_substream_block(index, block_stream)
+        await self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop('modified_access_conditions', None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        await self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            body=chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options)
+        return index, block_id
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            await self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        for each_byte in chunk_data:
+            if each_byte not in [0, b'\x00']:
+                return False
+        return True
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f'bytes={chunk_offset}-{chunk_end}'
+            computed_md5 = None
+            self.response_headers = await self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+            self.current_length = int(self.response_headers['blob_append_offset'])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        self.response_headers = await self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            await self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = await self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        range_id = f'bytes={chunk_offset}-{chunk_end}'
+        return range_id, response
+
+    # TODO: Implement this method.
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AsyncIterStreamer():
+    """
+    File-like streaming object for AsyncGenerators.
+    """
+    def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"):
+        self.iterator = generator.__aiter__()
+        self.leftover = b""
+        self.encoding = encoding
+
+    def seekable(self):
+        return False
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    async def read(self, size: int) -> bytes:
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = await self.iterator.__anext__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopAsyncIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py
new file mode 100644
index 00000000..12b63e83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py
@@ -0,0 +1,462 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Any, Callable, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import parse_qs
+
+from azure.storage.blob import generate_account_sas as generate_blob_account_sas
+from azure.storage.blob import generate_container_sas, generate_blob_sas
+from ._shared.models import Services
+from ._shared.shared_access_signature import QueryStringConstants
+
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from ._models import (
+        AccountSasPermissions,
+        DirectorySasPermissions,
+        FileSasPermissions,
+        FileSystemSasPermissions,
+        ResourceTypes,
+        UserDelegationKey
+    )
+
+
+def generate_account_sas(
+    account_name: str,
+    account_key: str,
+    resource_types: Union["ResourceTypes", str],
+    permission: Union["AccountSasPermissions", str],
+    expiry: Union["datetime", str],
+    *,
+    services: Union[Services, str] = Services(blob=True),
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for the DataLake service.
+
+    Use the returned signature as the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str account_key:
+        The access key to generate the shared access signature.
+    :param resource_types:
+        Specifies the resource types that are accessible with the account SAS.
+    :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+    :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        The provided datetime will always be interpreted as UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword Union[Services, str] services:
+        Specifies the services that the Shared Access Signature (sas) token will be able to be utilized with.
+        Will default to only this package (i.e. blobs) if not provided.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    return generate_blob_account_sas(
+        account_name=account_name,
+        account_key=account_key,
+        resource_types=resource_types,
+        permission=permission,
+        expiry=expiry,
+        services=services,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_file_system_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[FileSystemSasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a file system.
+
+    Use the returned signature with the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    return generate_container_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_directory_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    directory_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[DirectorySasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a directory.
+
+    Use the returned signature with the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str directory_name:
+        The name of the directory.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.DirectorySasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    depth = len(directory_name.strip("/").split("/"))
+    return generate_blob_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        blob_name=directory_name,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sdd=depth,
+        is_directory=True,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_file_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    directory_name,  # type: str
+    file_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[FileSasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a file.
+
+    Use the returned signature with the credential parameter of any BDataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str directory_name:
+        The name of the directory.
+    :param str file_name:
+        The name of the file.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.FileSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS. This can only be used when generating a SAS with delegation key.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    if directory_name:
+        path = directory_name.rstrip('/') + "/" + file_name
+    else:
+        path = file_name
+    return generate_blob_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        blob_name=path,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+def _is_credential_sastoken(credential: Any) -> bool:
+    if not credential or not isinstance(credential, str):
+        return False
+
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = parse_qs(credential.lstrip("?"))
+    if parsed_query and all(k in sas_values for k in parsed_query):
+        return True
+    return False
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py
new file mode 100644
index 00000000..6cd89540
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py
@@ -0,0 +1,105 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._deserialize import (
+    process_storage_error)
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import (
+    upload_data_chunks,
+    DataLakeFileChunkUploader, upload_substream_blocks)
+from ...core.exceptions import HttpResponseError
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
+    return any([
+        modified_access_conditions.if_modified_since,
+        modified_access_conditions.if_unmodified_since,
+        modified_access_conditions.if_none_match,
+        modified_access_conditions.if_match
+    ])
+
+
+def upload_datalake_file(
+        client=None,
+        stream=None,
+        length=None,
+        overwrite=None,
+        validate_content=None,
+        max_concurrency=None,
+        file_settings=None,
+        **kwargs):
+    try:
+        if length == 0:
+            return {}
+        properties = kwargs.pop('properties', None)
+        umask = kwargs.pop('umask', None)
+        permissions = kwargs.pop('permissions', None)
+        path_http_headers = kwargs.pop('path_http_headers', None)
+        modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+        chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+        encryption_context = kwargs.pop('encryption_context', None)
+
+        if not overwrite:
+            # if customers didn't specify access conditions, they cannot flush data to existing file
+            if not _any_conditions(modified_access_conditions):
+                modified_access_conditions.if_none_match = '*'
+            if properties or umask or permissions:
+                raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+        if overwrite:
+            response = client.create(
+                resource='file',
+                path_http_headers=path_http_headers,
+                properties=properties,
+                modified_access_conditions=modified_access_conditions,
+                umask=umask,
+                permissions=permissions,
+                encryption_context=encryption_context,
+                cls=return_response_headers,
+                **kwargs)
+
+            # this modified_access_conditions will be applied to flush_data to make sure
+            # no other flush between create and the current flush
+            modified_access_conditions.if_match = response['etag']
+            modified_access_conditions.if_none_match = None
+            modified_access_conditions.if_modified_since = None
+            modified_access_conditions.if_unmodified_since = None
+
+        use_original_upload_path = file_settings.use_byte_buffer or \
+            validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            upload_data_chunks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                **kwargs)
+        else:
+            upload_substream_blocks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                **kwargs
+            )
+
+        return client.flush_data(position=length,
+                                 path_http_headers=path_http_headers,
+                                 modified_access_conditions=modified_access_conditions,
+                                 close=True,
+                                 cls=return_response_headers,
+                                 **kwargs)
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py
new file mode 100644
index 00000000..de61a38b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.19.0"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py
new file mode 100644
index 00000000..c24dde8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py
@@ -0,0 +1,24 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download_async import StorageStreamDownloader
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._file_system_client_async import FileSystemClient
+from ._data_lake_service_client_async import DataLakeServiceClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+
+__all__ = [
+    'DataLakeServiceClient',
+    'FileSystemClient',
+    'DataLakeDirectoryClient',
+    'DataLakeFileClient',
+    'DataLakeLeaseClient',
+    'ExponentialRetry',
+    'LinearRetry',
+    'StorageStreamDownloader'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py
new file mode 100644
index 00000000..578f896e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py
@@ -0,0 +1,721 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote  # type: ignore
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase
+from .._deserialize import deserialize_dir_properties
+from .._models import DirectoryProperties, FileProperties
+from .._shared.base_client_async import AsyncTransportWrapper
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._list_paths_helper import PathPropertiesPaged
+from ._path_client_async import PathClient
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from datetime import datetime
+    from .._models import PathProperties
+
+
+class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase):
+    """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param directory_name:
+        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+    :type directory_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+            :start-after: [START instantiate_directory_client_from_conn_str]
+            :end-before: [END instantiate_directory_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        directory_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call
+                                                      credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, metadata=None,  # type: Optional[Dict[str, str]]
+                               **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new directory.
+
+        :param metadata:
+            Name-value pairs associated with the directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 8
+                :caption: Create directory.
+        """
+        return await self._create('directory', metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a directory exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._exists(**kwargs)
+
+    @distributed_trace_async
+    async def delete_directory(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified directory for deletion.
+
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 4
+                :caption: Delete directory.
+        """
+        return await self._delete(recursive=True, **kwargs)
+
+    @distributed_trace_async
+    async def get_directory_properties(self, **kwargs):
+        # type: (**Any) -> DirectoryProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the directory. It does not return the content of the directory.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.DirectoryProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START get_directory_properties]
+                :end-before: [END get_directory_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs)
+
+    @distributed_trace_async
+    async def rename_directory(self, new_name,  # type: str
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            the new directory name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient containing the renamed directory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START rename_directory]
+                :end-before: [END rename_directory]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source directory.
+        """
+        new_file_system, new_path, new_dir_sas = self._parse_rename_path(new_name)
+
+        new_directory_client = DataLakeDirectoryClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, directory_name=new_path,
+            credential=self._raw_credential or new_dir_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline)
+        await new_directory_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_directory_client
+
+    @distributed_trace_async
+    async def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                                   metadata=None,  # type: Optional[Dict[str, str]]
+                                   **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create a subdirectory and return the subdirectory client to be interacted with.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        await subdir.create_directory(metadata=metadata, **kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                                   **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified subdirectory for deletion.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        await subdir.delete_directory(**kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def create_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create a new file and return the file client to be interacted with.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with the new file.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        file_client = self.get_file_client(file)
+        await file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def get_paths(
+        self, *,
+        recursive: bool = True,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged["PathProperties"]:
+        """Returns an async generator to list the paths under specified file system and directory.
+        The generator will lazily follow the continuation tokens returned by the service.
+
+        :keyword bool recursive: Set True for recursive, False for iterative. The default value is True.
+        :keyword Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword Optional[bool] upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is None. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. The default value is None.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.PathProperties]
+        """
+        timeout = kwargs.pop('timeout', None)
+        hostname = self._hosts[self._location_mode]
+        url = f"{self.scheme}://{hostname}/{quote(self.file_system_name)}"
+        client = self._build_generated_client(url)
+        command = functools.partial(
+            client.file_system.list_paths,
+            path=self.path_name,
+            timeout=timeout,
+            **kwargs
+        )
+        return AsyncItemPaged(
+            command, recursive, path=self.path_name, max_results=max_results,
+            upn=upn, page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    def get_file_client(self, file  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+        """
+        try:
+            file_path = file.get('name')
+        except AttributeError:
+            file_path = self.path_name + '/' + str(file)
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
+                                 ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified subdirectory of the current directory.
+
+        The sub subdirectory need not already exist.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        try:
+            subdir_path = sub_directory.get('name')
+        except AttributeError:
+            subdir_path = self.path_name + '/' + str(sub_directory)
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(
+            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py
new file mode 100644
index 00000000..9b00b0b6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py
@@ -0,0 +1,735 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING)
+from urllib.parse import quote, unquote
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+from ._download_async import StorageStreamDownloader
+from ._path_client_async import PathClient
+from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase
+from .._serialize import convert_datetime_to_rfc1123
+from .._deserialize import process_storage_error, deserialize_file_properties
+from .._models import FileProperties
+from ..aio._upload_helper import upload_datalake_file
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings
+
+
+class DataLakeFileClient(PathClient, DataLakeFileClientBase):
+    """A client to interact with the DataLake file, even if the file may not yet exist.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :type file_path: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+            :start-after: [START instantiate_file_client_from_conn_str]
+            :end-before: [END instantiate_file_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        file_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+                                                 credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
+                          metadata=None,  # type: Optional[Dict[str, str]]
+                          **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new file.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 4
+                :caption: Create file.
+        """
+        return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._exists(**kwargs)
+
+    @distributed_trace_async
+    async def delete_file(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified file for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 4
+                :caption: Delete file.
+        """
+        return await self._delete(**kwargs)
+
+    @distributed_trace_async
+    async def get_file_properties(self, **kwargs):
+        # type: (**Any) -> FileProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file. It does not return the content of the file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.FileProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: All user-defined metadata, standard HTTP properties, and system properties for the file.
+        :rtype: ~azure.storage.filedatalake.FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START get_file_properties]
+                :end-before: [END get_file_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file.
+        """
+        return await self._get_path_properties(cls=deserialize_file_properties, **kwargs)
+
+    @distributed_trace_async
+    async def set_file_expiry(self, expiry_options,  # type: str
+                              expires_on=None,  # type: Optional[Union[datetime, int]]
+                              **kwargs):
+        # type: (...) -> None
+        """Sets the time a file will expire and be deleted.
+
+        :param str expiry_options:
+            Required. Indicates mode of the expiry time.
+            Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+        :param datetime or int expires_on:
+            The time to set the file to expiry.
+            When expiry_options is RelativeTo*, expires_on should be an int in milliseconds
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        if isinstance(expires_on, datetime):
+            expires_on = convert_datetime_to_rfc1123(expires_on)
+        elif expires_on is not None:
+            expires_on = str(expires_on)
+        await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on,
+                                                                       **kwargs)
+
+    @distributed_trace_async
+    async def upload_data(
+            self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            overwrite: Optional[bool] = False,
+            **kwargs
+        ) -> Dict[str, Any]:
+        """
+        Upload data to a file.
+
+        :param data: Content to be uploaded to file
+        :type data: bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], or IO[AnyStr]
+        :param int length: Size of the data in bytes.
+        :param bool overwrite: to overwrite an existing file or not.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the blob as metadata.
+        :paramtype metadata: dict[str, str] or None
+        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str permissions: Optional and only valid if Hierarchical Namespace
+         is enabled for the account. Sets POSIX access permissions for the file
+         owner, the file owning group, and others. Each class may be granted
+         read, write, or execute permission.  The sticky bit is also supported.
+         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+         supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int chunk_size:
+            The maximum chunk size for uploading a file in chunks.
+            Defaults to 100*1024*1024, or 100MB.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :return: response dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        options = self._upload_options(
+            data,
+            length=length,
+            overwrite=overwrite,
+            **kwargs)
+        return await upload_datalake_file(**options)
+
+    @distributed_trace_async
+    async def append_data(self, data,  # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+                          offset,  # type: int
+                          length=None,  # type: Optional[int]
+                          **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime, int]]
+        """Append data to the file.
+
+        :param data: Content to be appended to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int offset: start position of the data to be appended to.
+        :param length: Size of the data in bytes.
+        :type length: int or None
+        :keyword bool flush:
+            If true, will commit the data after it is appended.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete. Requires `flush=True`.
+            "acquire-release" - Acquire a lease and release it once the operations is complete. Requires `flush=True`.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: dict of the response header.
+        :rtype: dict[str, str], dict[str, ~datetime.datetime], or dict[str, int]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START append_data]
+                :end-before: [END append_data]
+                :language: python
+                :dedent: 4
+                :caption: Append data to the file.
+        """
+        options = self._append_data_options(
+            data=data,
+            offset=offset,
+            scheme=self.scheme,
+            length=length,
+            **kwargs)
+        try:
+            return await self._client.path.append_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def flush_data(self, offset,  # type: int
+                         retain_uncommitted_data=False,  # type: Optional[bool]
+                         **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """ Commit the previous appended data.
+
+        :param int offset: offset is equal to the length of the file after commit the
+            previous appended data.
+        :param bool retain_uncommitted_data: Valid only for flush operations.  If
+            "true", uncommitted data is retained after the flush operation
+            completes; otherwise, the uncommitted data is deleted after the flush
+            operation.  The default is false.  Data at offsets less than the
+            specified position are written to the file when flush succeeds, but
+            this optional parameter allows data after the flush position to be
+            retained for a future flush operation.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword bool close: Azure Storage Events allow applications to receive
+            notifications when files change. When Azure Storage Events are
+            enabled, a file changed event is raised. This event has a property
+            indicating whether this is the final change to distinguish the
+            difference between an intermediate flush to a file stream and the
+            final close of a file stream. The close query parameter is valid only
+            when the action is "flush" and change notifications are enabled. If
+            the value of close is "true" and the flush operation completes
+            successfully, the service raises a file change notification with a
+            property indicating that this is the final update (the file stream has
+            been closed). If "false" a change notification is raised indicating
+            the file has changed. The default is false. This query parameter is
+            set to true by the Hadoop ABFS driver to indicate that the file stream
+            has been closed."
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete.
+            "acquire-release" - Acquire a lease and release it once the operations is complete.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: response header in dict.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START upload_file_to_file_system]
+                :end-before: [END upload_file_to_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Commit the previous appended data.
+        """
+        options = self._flush_data_options(
+            offset,
+            self.scheme,
+            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+        try:
+            return await self._client.path.flush_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def download_file(self, offset=None, length=None, **kwargs):
+        # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content, or readinto() must be used to download the file into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword lease:
+            If specified, download only succeeds if the file's lease is active
+            and matches this ID. Required if the file has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START read_file]
+                :end-before: [END read_file]
+                :language: python
+                :dedent: 4
+                :caption: Return the downloaded data.
+        """
+        downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+        return StorageStreamDownloader(downloader)
+
+    @distributed_trace_async
+    async def rename_file(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeFileClient
+        """
+        Rename the source file.
+
+        :param str new_name: the new file name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: the renamed file client
+        :rtype: DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START rename_file]
+                :end-before: [END rename_file]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source file.
+        """
+        new_file_system, new_path, new_file_sas = self._parse_rename_path(new_name)
+
+        new_file_client = DataLakeFileClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, file_path=new_path,
+            credential=self._raw_credential or new_file_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode)
+        await new_file_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_file_client
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py
new file mode 100644
index 00000000..0dae4306
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py
@@ -0,0 +1,269 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Union, Optional, Any,
+    TypeVar, TYPE_CHECKING
+)
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import BlobLeaseClient
+from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase
+
+
+if TYPE_CHECKING:
+    FileSystemClient = TypeVar("FileSystemClient")
+    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+    DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(DataLakeLeaseClientBase):  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new DataLakeLeaseClient.
+
+    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file system, directory, or file to lease.
+    :type client: ~azure.storage.filedatalake.aio.FileSystemClient or
+        ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(
+            self, client, lease_id=None
+    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+        super(DataLakeLeaseClient, self).__init__(client, lease_id)
+
+        if hasattr(client, '_blob_client'):
+            _client = client._blob_client  # type: ignore
+        elif hasattr(client, '_container_client'):
+            _client = client._container_client  # type: ignore
+        else:
+            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+    def __enter__(self):
+        raise TypeError("Async lease must use 'async with'.")
+
+    def __exit__(self, *args):
+        self.release()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, lease_duration=-1, **kwargs):
+        # type: (int, Optional[int], **Any) -> None
+        """Requests a new lease.
+
+        If the file/file system does not have an active lease, the DataLake service creates a
+        lease on the file/file system and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def renew(self, **kwargs):
+        # type: (Any) -> None
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the file system or file. Note that
+        the lease may be renewed even if it has expired as long as the file system
+        or file has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.renew(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def release(self, **kwargs):
+        # type: (Any) -> None
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the file system or file. Releasing the lease allows another client
+        to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.release(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id, **kwargs):
+        # type: (str, Any) -> None
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def break_lease(self, lease_break_period=None, **kwargs):
+        # type: (Optional[int], Any) -> int
+        """Break the lease, if the file system or file has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the file system or file.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py
new file mode 100644
index 00000000..093aad92
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py
@@ -0,0 +1,570 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from azure.storage.blob.aio import BlobServiceClient
+from .._serialize import get_api_version
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._deserialize import get_datalake_service_properties
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from ._file_system_client_async import FileSystemClient
+from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase
+from .._shared.policies_async import ExponentialRetry
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._models import FileSystemPropertiesPaged
+from .._models import UserDelegationKey, LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+
+
+class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase):
+    """A client to interact with the DataLake Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete file systems within the account.
+    For operations relating to a specific file system, directory or file, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the datalake service endpoint.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URL to the DataLake storage account. Any other entities included
+        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_service_async.py
+            :start-after: [START create_datalake_service_client]
+            :end-before: [END create_datalake_service_client]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+
+        .. literalinclude:: ../samples/datalake_samples_service_async.py
+            :start-after: [START create_datalake_service_client_oauth]
+            :end-before: [END create_datalake_service_client_oauth]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+            self, account_url: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        super(DataLakeServiceClient, self).__init__(
+            account_url,
+            credential=credential,
+            **kwargs
+        )
+        self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs)
+        self._blob_service_client._hosts[LocationMode.SECONDARY] = ""
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)
+        self._loop = kwargs.get('loop', None)
+
+    async def __aenter__(self):
+        await self._blob_service_client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._blob_service_client.close()
+        await super(DataLakeServiceClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    @distributed_trace_async
+    async def get_user_delegation_key(self, key_start_time,  # type: datetime
+                                      key_expiry_time,  # type: datetime
+                                      **kwargs  # type: Any
+                                      ):
+        # type: (...) -> UserDelegationKey
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_user_delegation_key]
+                :end-before: [END get_user_delegation_key]
+                :language: python
+                :dedent: 8
+                :caption: Get user delegation key from datalake service client.
+        """
+        delegation_key = await self._blob_service_client.get_user_delegation_key(
+            key_start_time=key_start_time,
+            key_expiry_time=key_expiry_time,
+            **kwargs)
+        return UserDelegationKey._from_generated(delegation_key)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
+                          include_metadata=None,  # type: Optional[bool]
+                          **kwargs):
+        # type: (...) -> ItemPaged[FileSystemProperties]
+        """Returns a generator to list the file systems under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all file systems have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only file systems whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that file system metadata be returned in the response.
+            The default value is `False`.
+        :keyword int results_per_page:
+            The maximum number of file system names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword bool include_deleted:
+            Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.3.0
+        :keyword bool include_system:
+            Flag specifying that system filesystems should be included.
+            .. versionadded:: 12.6.0
+        :returns: An iterable (auto-paging) of FileSystemProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START list_file_systems]
+                :end-before: [END list_file_systems]
+                :language: python
+                :dedent: 8
+                :caption: Listing the file systems in the datalake service.
+        """
+        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+                                                               include_metadata=include_metadata,
+                                                               **kwargs)
+        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
+        return item_paged
+
+    @distributed_trace_async
+    async def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                                 metadata=None,  # type: Optional[Dict[str, str]]
+                                 public_access=None,  # type: Optional[PublicAccess]
+                                 **kwargs):
+        # type: (...) -> FileSystemClient
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param str file_system:
+            The name of the file system to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: file system, file.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient under the specified account.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START create_file_system_from_service_client]
+                :end-before: [END create_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Creating a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+        return file_system_client
+
+    async def _rename_file_system(self, name, new_name, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str name:
+            The name of the filesystem to rename.
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with the newly specified name.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        await self._blob_service_client._rename_container(name, new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = self.get_file_system_client(new_name)
+        return renamed_file_system
+
+    @distributed_trace_async
+    async def undelete_file_system(self, name, deleted_version, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Restores soft-deleted filesystem.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.3.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str name:
+            Specifies the name of the deleted filesystem to restore.
+        :param str deleted_version:
+            Specifies the version of the deleted filesystem to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: The FileSystemClient of the restored soft-deleted filesystem.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs)
+        file_system = self.get_file_system_client(new_name or name)
+        return file_system
+
+    @distributed_trace_async
+    async def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                                 **kwargs):
+        # type: (...) -> FileSystemClient
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :param file_system:
+            The file system to delete. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient after marking the specified file system for deletion.
+        :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START delete_file_system_from_service_client]
+                :end-before: [END delete_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Deleting a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        await file_system_client.delete_file_system(**kwargs)
+        return file_system_client
+
+    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
+                               ):
+        # type: (...) -> FileSystemClient
+        """Get a client to interact with the specified file system.
+
+        The file system need not already exist.
+
+        :param file_system:
+            The file system. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_system_client_from_service]
+                :end-before: [END create_file_system_client_from_service]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file system client to interact with a specific file system.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+                                api_version=self.api_version,
+                                _configuration=self._config,
+                                _pipeline=_pipeline, _hosts=self._hosts)
+
+    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                             directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param file_system:
+            The file system that the directory is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_directory_client_from_service_client]
+                :end-before: [END get_directory_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            directory_name = directory.name
+        except AttributeError:
+            directory_name = directory
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                        file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_system:
+            The file system that the file is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param file_path:
+            The file with which to interact. This can either be the full path of the file(from the root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_file_client_from_service_client]
+                :end-before: [END get_file_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            file_path = file_path.name
+        except AttributeError:
+            pass
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace_async
+    async def set_service_properties(self, **kwargs):
+        # type: (**Any) -> None
+        """Sets the properties of a storage account's Datalake service, including
+        Azure Storage Analytics.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+        :keyword hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates.
+        :type hour_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute.
+        :type minute_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.filedatalake.CorsRule]
+        :keyword str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :keyword delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted files/directories.
+            It also specifies the number of days and versions of file/directory to keep.
+        :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+        :keyword static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.filedatalake.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        """
+        await self._blob_service_client.set_service_properties(**kwargs)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs):
+        # type: (**Any) -> Dict[str, Any]
+        """Gets the properties of a storage account's datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An object containing datalake service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: dict[str, Any]
+        """
+        props = await self._blob_service_client.get_service_properties(**kwargs)
+        return get_datalake_service_properties(props)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py
new file mode 100644
index 00000000..e22a9df9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import AsyncIterator, IO, Optional
+
+from .._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+    """A streaming object to download from Azure Storage.
+
+    :ivar str name:
+        The name of the file being downloaded.
+    :ivar ~azure.storage.filedatalake.FileProperties properties:
+        The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties.
+    :ivar int size:
+        The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file.
+    """
+
+    def __init__(self, downloader):
+        self._downloader = downloader
+        self.name = self._downloader.name
+
+        # Parse additional Datalake-only properties
+        encryption_context = self._downloader._response.response.headers.get('x-ms-encryption-context')
+        acl = self._downloader._response.response.headers.get('x-ms-acl')
+
+        self.properties = from_blob_properties(
+            self._downloader.properties,
+            encryption_context=encryption_context,
+            acl=acl)
+        self.size = self._downloader.size
+
+    def __len__(self):
+        return self.size
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """Iterate over chunks in the download stream.
+
+        :returns: An async iterator over the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+        """
+        return self._downloader.chunks()
+
+    async def read(self, size: Optional[int] = -1) -> bytes:
+        """
+        Read up to size bytes from the stream and return them. If size
+        is unspecified or is -1, all bytes will be read.
+
+        :param Optional[int] size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set to -1 to download all bytes.
+        :returns:
+            The requested data as bytes. If the return value is empty, there is no more data to read.
+        :rtype: bytes
+        """
+        return await self._downloader.read(size)
+
+    async def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :returns: The contents of the file.
+        :rtype: bytes
+        """
+        return await self._downloader.readall()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        return await self._downloader.readinto(stream)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py
new file mode 100644
index 00000000..9c3122a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py
@@ -0,0 +1,1004 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (  # pylint: disable=unused-import
+    Union, Optional, Any, Dict, List, Tuple,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+
+from azure.core.pipeline import AsyncPipeline
+from azure.core.async_paging import AsyncItemPaged
+
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import ContainerClient
+from .._serialize import get_api_version
+from .._deserialize import process_storage_error, is_file_path
+from .._generated.models import ListBlobsIncludeItem
+
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._file_system_client import FileSystemClient as FileSystemClientBase
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from .._shared.policies_async import ExponentialRetry
+from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties
+from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged
+
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from datetime import datetime
+    from .._models import PathProperties
+
+
+class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase):
+    """A client to interact with a specific file system, even if that file system
+    may not yet exist.
+
+    For operations relating to a specific directory or file within this file system, a directory client or file client
+    can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+            :start-after: [START create_file_system_client_from_service]
+            :end-before: [END create_file_system_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+     """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        super(FileSystemClient, self).__init__(
+            account_url,
+            file_system_name=file_system_name,
+            credential=credential,
+            **kwargs)
+        # to override the class field _container_client sync version
+        kwargs.pop('_hosts', None)
+        self._container_client = ContainerClient(self._blob_account_url, self.file_system_name,
+                                                 credential=credential,
+                                                 _hosts=self._container_client._hosts,
+                                                 **kwargs)  # type: ignore
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url,
+                                                   file_system=self.file_system_name, pipeline=self._pipeline)
+        self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+                                                                               base_url=self._container_client.url,
+                                                                               file_system=self.file_system_name,
+                                                                               pipeline=self._pipeline)
+        api_version = get_api_version(kwargs)
+        self._client._config.version = api_version
+        self._datalake_client_for_blob_operation._config.version = api_version
+
+        self._loop = kwargs.get('loop', None)
+
+    async def __aexit__(self, *args):
+        await self._container_client.close()
+        await self._datalake_client_for_blob_operation.close()
+        await super(FileSystemClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    @distributed_trace_async
+    async def acquire_lease(
+            self, lease_duration=-1,  # type: int
+            lease_id=None,  # type: Optional[str]
+            **kwargs
+    ):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file system does not have an active lease,
+        the DataLake service creates a lease on the file system and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START acquire_lease_on_file_system]
+                :end-before: [END acquire_lease_on_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on the file_system.
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
+                                 public_access=None,  # type: Optional[PublicAccess]
+                                 **kwargs):
+        # type: (...) ->  Dict[str, Union[str, datetime]]
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_system]
+                :end-before: [END create_file_system]
+                :language: python
+                :dedent: 16
+                :caption: Creating a file system in the datalake service.
+        """
+        encryption_scope_options = kwargs.pop('encryption_scope_options', None)
+        return await self._container_client.create_container(metadata=metadata,
+                                                             public_access=public_access,
+                                                             container_encryption_scope=encryption_scope_options,
+                                                             **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file system exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file system exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._container_client.exists(**kwargs)
+
+    @distributed_trace_async
+    async def _rename_file_system(self, new_name, **kwargs):
+        # type: (str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with renamed properties.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        await self._container_client._rename_container(new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = FileSystemClient(
+                f"{self.scheme}://{self.primary_hostname}", file_system_name=new_name,
+                credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts)
+        return renamed_file_system
+
+    @distributed_trace_async
+    async def delete_file_system(self, **kwargs):
+        # type: (Any) -> None
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START delete_file_system]
+                :end-before: [END delete_file_system]
+                :language: python
+                :dedent: 16
+                :caption: Deleting a file system in the datalake service.
+        """
+        await self._container_client.delete_container(**kwargs)
+
+    @distributed_trace_async
+    async def get_file_system_properties(self, **kwargs):
+        # type: (Any) -> FileSystemProperties
+        """Returns all user-defined metadata and system properties for the specified
+        file system. The data returned does not include the file system's list of paths.
+
+        :keyword lease:
+            If specified, get_file_system_properties only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Properties for the specified file system within a file system object.
+        :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_file_system_properties]
+                :end-before: [END get_file_system_properties]
+                :language: python
+                :dedent: 16
+                :caption: Getting properties on the file system.
+        """
+        container_properties = await self._container_client.get_container_properties(**kwargs)
+        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
+
+    @distributed_trace_async
+    async def set_file_system_metadata(  # type: ignore
+            self, metadata,  # type: Dict[str, str]
+            **kwargs
+    ):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START set_file_system_metadata]
+                :end-before: [END set_file_system_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Setting metadata on the container.
+        """
+        return await self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def set_file_system_access_policy(
+            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
+            public_access=None,  # type: Optional[Union[str, PublicAccess]]
+            **kwargs
+    ):  # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets the permissions for the specified file system or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a file system may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the file system. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+        :param ~azure.storage.filedatalake.PublicAccess public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :keyword lease:
+            Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: filesystem-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or ~datetime.datetime]
+        """
+        return await self._container_client.set_container_access_policy(signed_identifiers,
+                                                                        public_access=public_access, **kwargs)
+
+    @distributed_trace_async
+    async def get_file_system_access_policy(self, **kwargs):
+        # type: (Any) -> Dict[str, Any]
+        """Gets the permissions for the specified file system.
+        The permissions indicate whether file system data may be accessed publicly.
+
+        :keyword lease:
+            If specified, get_file_system_access_policy only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_policy = await self._container_client.get_container_access_policy(**kwargs)
+        return {
+            'public_access': PublicAccess._from_generated(access_policy['public_access']),  # pylint: disable=protected-access
+            'signed_identifiers': access_policy['signed_identifiers']
+        }
+
+    @distributed_trace
+    def get_paths(
+        self, path: Optional[str] = None,
+        recursive: Optional[bool] = True,
+        max_results: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged["PathProperties"]:
+        """Returns a generator to list the paths(could be files or directories) under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str path:
+            Filters the results to return only paths under the specified path.
+        :param Optional[bool] recursive: Optional. Set True for recursive, False for iterative.
+        :param int max_results:
+            An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.PathProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_paths_in_file_system]
+                :end-before: [END get_paths_in_file_system]
+                :language: python
+                :dedent: 12
+                :caption: List the blobs in the file system.
+        """
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.file_system.list_paths,
+            path=path,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, recursive, path=path, max_results=max_results,
+            page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                               metadata=None,  # type: Optional[Dict[str, str]]
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create directory
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient with new directory and metadata.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_directory_from_file_system]
+                :end-before: [END create_directory_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Create directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        await directory_client.create_directory(metadata=metadata, **kwargs)
+        return directory_client
+
+    @distributed_trace_async
+    async def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified path for deletion.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient after deleting specified directory.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START delete_directory_from_file_system]
+                :end-before: [END delete_directory_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Delete directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        await directory_client.delete_directory(**kwargs)
+        return directory_client
+
+    @distributed_trace_async
+    async def create_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create file
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with new file created.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_from_file_system]
+                :end-before: [END create_file_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Create file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        await file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def delete_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Marks the specified file for deletion.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: DataLakeFileClient after deleting specified file.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeFileClient
+
+        .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+            :start-after: [START delete_file_from_file_system]
+            :end-before: [END delete_file_from_file_system]
+            :language: python
+            :dedent: 12
+            :caption: Delete file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        await file_client.delete_file(**kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+        # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+        """Restores soft-deleted path.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :param str deleted_path_name:
+            Specifies the name of the deleted container to restore.
+        :param str deletion_id:
+            Specifies the version of the deleted container to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Returns the DataLake client for the restored soft-deleted path.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+                or azure.storage.file.datalake.aio.DataLakeFileClient
+        """
+        _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+        pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        path_client = AzureDataLakeStorageRESTAPI(
+            url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+        try:
+            is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+            if is_file:
+                return self.get_file_client(deleted_path_name)
+            return self.get_directory_client(deleted_path_name)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_root_directory_client(self):
+        # type: () -> DataLakeDirectoryClient
+        """Get a client to interact with the root directory.
+
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        return self.get_directory_client('/')
+
+    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_directory_client_from_file_system]
+                :end-before: [END get_directory_client_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            directory_name = directory.get('name')
+        except AttributeError:
+            directory_name = str(directory)
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts,
+                                       loop=self._loop)
+
+    def get_file_client(self, file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_path:
+            The file with which to interact. This can either be the path of the file(from root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_file_client_from_file_system]
+                :end-before: [END get_file_client_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_path = file_path.get('name')
+        except AttributeError:
+            file_path = str(file_path)
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, loop=self._loop)
+
+    @distributed_trace
+    def list_deleted_paths(self, **kwargs):
+        # type: (Any) -> AsyncItemPaged[DeletedPathProperties]
+        """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword str path_prefix:
+            Filters the results to return only paths under the specified path.
+        :keyword int results_per_page:
+            An optional value that specifies the maximum number of items to return per page.
+            If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of DeletedPathProperties.
+        :rtype:
+            ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+        """
+        path_prefix = kwargs.pop('path_prefix', None)
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+            showonly=ListBlobsIncludeItem.deleted,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+            results_per_page=results_per_page, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py
new file mode 100644
index 00000000..4d802635
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py
@@ -0,0 +1,176 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+from azure.core.exceptions import HttpResponseError
+from azure.core.async_paging import AsyncPageIterator
+
+from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \
+    return_headers_and_deserialized_path_list
+from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+
+from .._shared.models import DictMixin
+from .._shared.response_handlers import return_context_and_deserialized
+from .._generated.models import Path
+from .._models import PathProperties
+
+
+class DeletedPathPropertiesPaged(AsyncPageIterator):
+    """An Iterable of deleted path properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A path name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+    :ivar str container: The container that the paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+    :param callable command: Function to retrieve the next page of items.
+    """
+    def __init__(
+            self, command,
+            container=None,
+            prefix=None,
+            results_per_page=None,
+            continuation_token=None,
+            delimiter=None,
+            location_mode=None):
+        super(DeletedPathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                max_results=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = self._response.segment.blob_prefixes  + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobItemInternal):
+            file_props = get_deleted_path_properties_from_generated_code(item)
+            file_props.file_system = self.container
+            return file_props
+        if isinstance(item, GenBlobPrefix):
+            return DirectoryPrefix(
+                container=self.container,
+                prefix=item.name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
+
+
+class DirectoryPrefix(DictMixin):
+    """Directory prefix.
+
+    :ivar str name: Name of the deleted directory.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar str file_system: The file system that the deleted paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.file_system = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class PathPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Path properties.
+
+    :ivar str path: Filters the results to return only paths under the specified path.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str path: Filters the results to return only paths under the specified path.
+    :param int max_results: The maximum number of paths to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(
+            self, command,
+            recursive,
+            path=None,
+            max_results=None,
+            continuation_token=None,
+            upn=None):
+        super(PathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.recursive = recursive
+        self.results_per_page = max_results
+        self.path = path
+        self.upn = upn
+        self.current_page = None
+        self.path_list = None
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                self.recursive,
+                continuation=continuation_token or None,
+                path=self.path,
+                max_results=self.results_per_page,
+                upn=self.upn,
+                cls=return_headers_and_deserialized_path_list)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.path_list, self._response = get_next_return
+        self.current_page = [self._build_item(item) for item in self.path_list]
+
+        return self._response['continuation'] or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, PathProperties):
+            return item
+        if isinstance(item, Path):
+            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
+            return path
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py
new file mode 100644
index 00000000..923cbb61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py
@@ -0,0 +1,40 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+from azure.storage.blob.aio._models import ContainerPropertiesPaged
+from .._models import FileSystemProperties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+    """An Iterable of File System properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A file system name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str prefix: Filters the results to return only file systems whose names
+        begin with the specified prefix.
+    :param int results_per_page: The maximum number of file system names to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(FileSystemPropertiesPaged, self).__init__(
+            *args,
+            **kwargs
+        )
+
+    @staticmethod
+    def _build_item(item):
+        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py
new file mode 100644
index 00000000..774f687d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py
@@ -0,0 +1,901 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import BlobClient
+from .._serialize import get_api_version, compare_api_versions
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin
+from .._path_client import PathClient as PathClientBase
+from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \
+    AccessControlChangeCounters, AccessControlChanges
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._deserialize import process_storage_error
+from .._shared.policies_async import ExponentialRetry
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings, FileProperties
+
+
+class PathClient(AsyncStorageAccountHostsMixin, PathClientBase):
+    """A base client for interacting with a DataLake file/directory, even if the file/directory may not
+    yet exist.
+
+    :param str account_url:
+        The URI to the storage account.
+    :param str file_system_name:
+        The file system for the directory or files.
+    :param str file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+            self, account_url: str,
+            file_system_name: str,
+            path_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+
+        super(PathClient, self).__init__(account_url,  # pylint: disable=specify-parameter-names-in-call
+                                         file_system_name, path_name,
+                                         credential=credential,
+                                         **kwargs)  # type: ignore
+
+        kwargs.pop('_hosts', None)
+
+        self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=self.file_system_name,
+                                       blob_name=self.path_name,
+                                       credential=credential,
+                                       _hosts=self._blob_client._hosts,
+                                       **kwargs)
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client(self.url)
+        self._datalake_client_for_blob_operation = self._build_generated_client(self._blob_client.url)
+        self._loop = kwargs.get('loop', None)
+
+    def _build_generated_client(self, url: str) -> AzureDataLakeStorageRESTAPI:
+        client = AzureDataLakeStorageRESTAPI(
+            url,
+            base_url=url,
+            file_system=self.file_system_name,
+            path=self.path_name,
+            pipeline=self._pipeline
+        )
+        client._config.version = self._api_version  # pylint: disable=protected-access
+        return client
+
+    async def __aexit__(self, *args):
+        await self._blob_client.close()
+        await self._datalake_client_for_blob_operation.close()
+        await super(PathClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create directory or file
+
+        :param resource_type:
+            Required for Create File and Create Directory.
+            The value must be "file" or "directory". Possible values include:
+            'directory', 'file'
+        :type resource_type: str
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file/directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :rtype: dict[str, Union[str, datetime]]
+        """
+        lease_id = kwargs.get('lease_id', None)
+        lease_duration = kwargs.get('lease_duration', None)
+        if lease_id and not lease_duration:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        if lease_duration and not lease_id:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        options = self._create_path_options(
+            resource_type,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return await self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _delete(self, **kwargs):
+        # type: (**Any) -> Dict[Union[datetime, str]]
+        """
+        Marks the specified path for deletion.
+
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary containing information about the deleted path.
+        :rtype: dict[str, Any]
+        """
+        # Perform paginated delete only if using OAuth, deleting a directory, and api version is 2023-08-03 or later
+        # The pagination is only for ACL checks, the final request remains the atomic delete operation
+        paginated = None
+        if (compare_api_versions(self.api_version, '2023-08-03') >= 0 and
+            hasattr(self.credential, 'get_token') and
+            kwargs.get('recursive')):  # Directory delete will always specify recursive
+            paginated = True
+
+        options = self._delete_path_options(paginated, **kwargs)
+        try:
+            response_headers = await self._client.path.delete(**options)
+            # Loop until continuation token is None for paginated delete
+            while response_headers['continuation']:
+                response_headers = await self._client.path.delete(
+                    continuation=response_headers['continuation'],
+                    **options)
+
+            return response_headers
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_access_control(self, owner=None,  # type: Optional[str]
+                                 group=None,  # type: Optional[str]
+                                 permissions=None,  # type: Optional[str]
+                                 acl=None,  # type: Optional[str]
+                                 **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Set the owner, group, permissions, or access control list for a path.
+
+        :param owner:
+            Optional. The owner of the file or directory.
+        :type owner: str
+        :param group:
+            Optional. The owning group of the file or directory.
+        :type group: str
+        :param permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+            permissions and acl are mutually exclusive.
+        :type permissions: str
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+            permissions and acl are mutually exclusive.
+        :type acl: str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: dict containing access control options after setting modifications (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+        try:
+            return await self._client.path.set_access_control(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_access_control(self, upn=None,  # type: Optional[bool]
+                                 **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """
+        Get the owner, group, permissions, or access control list for a path.
+
+        :param upn:
+            Optional. Valid only when Hierarchical Namespace is
+            enabled for the account. If "true", the user identity values returned
+            in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+            transformed from Azure Active Directory Object IDs to User Principal
+            Names.  If "false", the values will be returned as Azure Active
+            Directory Object IDs. The default value is false. Note that group and
+            application Object IDs are not translated because they do not have
+            unique friendly names.
+        :type upn: bool
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        options = self._get_access_control_options(upn=upn, **kwargs)
+        try:
+            return await self._client.path.get_properties(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Sets the Access Control on a path and sub-paths.
+
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    @distributed_trace_async
+    async def update_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Modifies the Access Control on a path and sub-paths.
+
+        :param acl:
+            Modifies POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single,
+            change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    @distributed_trace_async
+    async def remove_access_control_recursive(self,
+                                              acl,
+                                              **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Removes the Access Control on a path and sub-paths.
+
+        :param acl:
+            Removes POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, and a user or
+            group identifier in the format "[scope:][type]:[id]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    async def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+        try:
+            continue_on_failure = options.get('force_flag')
+            total_directories_successful = 0
+            total_files_success = 0
+            total_failure_count = 0
+            batch_count = 0
+            last_continuation_token = None
+            current_continuation_token = None
+            continue_operation = True
+            while continue_operation:
+                headers, resp = await self._client.path.set_access_control_recursive(**options)
+
+                # make a running tally so that we can report the final results
+                total_directories_successful += resp.directories_successful
+                total_files_success += resp.files_successful
+                total_failure_count += resp.failure_count
+                batch_count += 1
+                current_continuation_token = headers['continuation']
+
+                if current_continuation_token is not None:
+                    last_continuation_token = current_continuation_token
+
+                if progress_hook is not None:
+                    await progress_hook(AccessControlChanges(
+                        batch_counters=AccessControlChangeCounters(
+                            directories_successful=resp.directories_successful,
+                            files_successful=resp.files_successful,
+                            failure_count=resp.failure_count,
+                        ),
+                        aggregate_counters=AccessControlChangeCounters(
+                            directories_successful=total_directories_successful,
+                            files_successful=total_files_success,
+                            failure_count=total_failure_count,
+                        ),
+                        batch_failures=[AccessControlChangeFailure(
+                            name=failure.name,
+                            is_directory=failure.type == 'DIRECTORY',
+                            error_message=failure.error_message) for failure in resp.failed_entries],
+                        continuation=last_continuation_token))
+
+                # update the continuation token, if there are more operations that cannot be completed in a single call
+                max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+                continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+                options['continuation'] = current_continuation_token
+
+            # currently the service stops on any failure, so we should send back the last continuation token
+            # for the user to retry the failed updates
+            # otherwise we should just return what the service gave us
+            return AccessControlChangeResult(counters=AccessControlChangeCounters(
+                directories_successful=total_directories_successful,
+                files_successful=total_files_success,
+                failure_count=total_failure_count),
+                continuation=last_continuation_token
+                if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+        except HttpResponseError as error:
+            error.continuation_token = last_continuation_token
+            process_storage_error(error)
+        except AzureError as error:
+            error.continuation_token = last_continuation_token
+            raise error
+
+    async def _rename_path(self, rename_source, **kwargs):
+        # type: (str, **Any) -> Dict[str, Any]
+        """
+        Rename directory or file
+
+        :param rename_source: The value must have the following format: "/{filesystem}/{path}".
+        :type rename_source: str
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing information about the renamed path.
+        :rtype: dict[str, Any]
+        """
+        options = self._rename_path_options(
+            rename_source,
+            **kwargs)
+        try:
+            return await self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _get_path_properties(self, **kwargs):
+        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file or directory. It does not return the content of the directory or file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file/directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User 
+            Principal Names in the owner, group, and acl fields of the respective property object returned.
+            If False, the values will be returned as Azure Active Directory Object IDs.
+            The default value is False. Note that group and application Object IDs are not translate
+            because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties or FileProperties
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        path_properties = await self._blob_client.get_blob_properties(**kwargs)
+        return path_properties
+
+    async def _exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a path exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a path exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._blob_client.exists(**kwargs)
+
+    @distributed_trace_async
+    async def set_metadata(self, metadata,  # type: Dict[str, str]
+                           **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def set_http_headers(self, content_settings=None,  # type: Optional[ContentSettings]
+                               **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """Sets system properties on the file or directory.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set file/directory properties.
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file/directory-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+        """
+        return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+    @distributed_trace_async
+    async def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
+                            lease_id=None,  # type: Optional[str]
+                            **kwargs):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file or directory does not have an active lease,
+        the DataLake service creates a lease on the file/directory and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py
new file mode 100644
index 00000000..40d24a03
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py
@@ -0,0 +1,104 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from azure.core.exceptions import HttpResponseError
+from .._deserialize import (
+    process_storage_error)
+from .._shared.response_handlers import return_response_headers
+from .._shared.uploads_async import (
+    upload_data_chunks,
+    DataLakeFileChunkUploader, upload_substream_blocks)
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
+    return any([
+        modified_access_conditions.if_modified_since,
+        modified_access_conditions.if_unmodified_since,
+        modified_access_conditions.if_none_match,
+        modified_access_conditions.if_match
+    ])
+
+
+async def upload_datalake_file(
+        client=None,
+        stream=None,
+        length=None,
+        overwrite=None,
+        validate_content=None,
+        max_concurrency=None,
+        file_settings=None,
+        **kwargs):
+    try:
+        if length == 0:
+            return {}
+        properties = kwargs.pop('properties', None)
+        umask = kwargs.pop('umask', None)
+        permissions = kwargs.pop('permissions', None)
+        path_http_headers = kwargs.pop('path_http_headers', None)
+        modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+        chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+        encryption_context = kwargs.pop('encryption_context', None)
+
+        if not overwrite:
+            # if customers didn't specify access conditions, they cannot flush data to existing file
+            if not _any_conditions(modified_access_conditions):
+                modified_access_conditions.if_none_match = '*'
+            if properties or umask or permissions:
+                raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+        if overwrite:
+            response = await client.create(
+                resource='file',
+                path_http_headers=path_http_headers,
+                properties=properties,
+                modified_access_conditions=modified_access_conditions,
+                umask=umask,
+                permissions=permissions,
+                encryption_context=encryption_context,
+                cls=return_response_headers,
+                **kwargs)
+
+            # this modified_access_conditions will be applied to flush_data to make sure
+            # no other flush between create and the current flush
+            modified_access_conditions.if_match = response['etag']
+            modified_access_conditions.if_none_match = None
+            modified_access_conditions.if_modified_since = None
+            modified_access_conditions.if_unmodified_since = None
+
+        use_original_upload_path = file_settings.use_byte_buffer or \
+            validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            await upload_data_chunks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                **kwargs)
+        else:
+            await upload_substream_blocks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                **kwargs
+            )
+
+        return await client.flush_data(position=length,
+                                       path_http_headers=path_http_headers,
+                                       modified_access_conditions=modified_access_conditions,
+                                       close=True,
+                                       cls=return_response_headers,
+                                       **kwargs)
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed