about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py114
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py628
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py1968
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py161
9 files changed, 3030 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
new file mode 100644
index 00000000..8a9f7149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureDataLakeStorageRESTAPI",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000..ecfcec9b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable, Optional
+from typing_extensions import Self
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+
+from .. import models as _models
+from .._serialization import Deserializer, Serializer
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .operations import FileSystemOperations, PathOperations, ServiceOperations
+
+
+class AzureDataLakeStorageRESTAPI:  # pylint: disable=client-accepts-api-version-keyword
+    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations
+    :ivar file_system: FileSystemOperations operations
+    :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations
+    :ivar path: PathOperations operations
+    :vartype path: azure.storage.filedatalake.aio.operations.PathOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", x_ms_lease_duration: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        self._config = AzureDataLakeStorageRESTAPIConfiguration(
+            url=url, x_ms_lease_duration=x_ms_lease_duration, **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(
+        self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+    ) -> Awaitable[AsyncHttpResponse]:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = await client._send_request(request)
+        <AsyncHttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.AsyncHttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    async def close(self) -> None:
+        await self._client.close()
+
+    async def __aenter__(self) -> Self:
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *exc_details: Any) -> None:
+        await self._client.__aexit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
new file mode 100644
index 00000000..57b28d3b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureDataLakeStorageRESTAPIConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureDataLakeStorageRESTAPI.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, x_ms_lease_duration: Optional[int] = None, **kwargs: Any) -> None:
+        resource: Literal["filesystem"] = kwargs.pop("resource", "filesystem")
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.x_ms_lease_duration = x_ms_lease_duration
+        self.resource = resource
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azuredatalakestoragerestapi/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000..56a7ece3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._file_system_operations import FileSystemOperations  # type: ignore
+from ._path_operations import PathOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "FileSystemOperations",
+    "PathOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
new file mode 100644
index 00000000..ee562931
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
@@ -0,0 +1,628 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._file_system_operations import (
+    build_create_request,
+    build_delete_request,
+    build_get_properties_request,
+    build_list_blob_hierarchy_segment_request,
+    build_list_paths_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FileSystemOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`file_system` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create FileSystem.
+
+        Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+        operation fails.  This operation does not support conditional HTTP requests.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set FileSystem Properties.
+
+        Set properties for the FileSystem.  This operation supports conditional HTTP requests.  For
+        more information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get FileSystem Properties.
+
+        All system and user-defined filesystem properties are specified in the response headers.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete FileSystem.
+
+        Marks the FileSystem for deletion.  When a FileSystem is deleted, a FileSystem with the same
+        identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+        attempts to create a filesystem with the same identifier will fail with status code 409
+        (Conflict), with the service returning additional error information indicating that the
+        filesystem is being deleted. All other operations, including operations on any files or
+        directories within the filesystem, will fail with status code 404 (Not Found) while the
+        filesystem is being deleted. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_paths(
+        self,
+        recursive: bool,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        path: Optional[str] = None,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.PathList:
+        # pylint: disable=line-too-long
+        """List Paths.
+
+        List FileSystem paths and their properties.
+
+        :param recursive: Required. Required.
+        :type recursive: bool
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param path: Optional.  Filters results to paths within the specified directory. An error
+         occurs if the directory does not exist. Default value is None.
+        :type path: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :return: PathList or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.PathList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[_models.PathList] = kwargs.pop("cls", None)
+
+        _request = build_list_paths_request(
+            url=self._config.url,
+            recursive=recursive,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            continuation=continuation,
+            path=path,
+            max_results=max_results,
+            upn=upn,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        deserialized = self._deserialize("PathList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_blob_hierarchy_segment(
+        self,
+        prefix: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        showonly: Literal["deleted"] = "deleted",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Default value is None.
+        :type delimiter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+        :param showonly: Include this parameter to specify one or more datasets to include in the
+         response. Known values are "deleted" and None. Default value is "deleted".
+        :type showonly: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            delimiter=delimiter,
+            marker=marker,
+            max_results=max_results,
+            include=include,
+            showonly=showonly,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
new file mode 100644
index 00000000..d3ed5c3c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
@@ -0,0 +1,1968 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._path_operations import (
+    build_append_data_request,
+    build_create_request,
+    build_delete_request,
+    build_flush_data_request,
+    build_get_properties_request,
+    build_lease_request,
+    build_read_request,
+    build_set_access_control_recursive_request,
+    build_set_access_control_request,
+    build_set_expiry_request,
+    build_undelete_request,
+    build_update_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class PathOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`path` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        resource: Optional[Union[str, _models.PathResourceType]] = None,
+        continuation: Optional[str] = None,
+        mode: Optional[Union[str, _models.PathRenameMode]] = None,
+        rename_source: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        properties: Optional[str] = None,
+        permissions: Optional[str] = None,
+        umask: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        acl: Optional[str] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_duration: Optional[int] = None,
+        expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+        expires_on: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create File | Create Directory | Rename File | Rename Directory.
+
+        Create or rename a file or directory.    By default, the destination is overwritten and if the
+        destination already exists and has a lease the lease is broken.  This operation supports
+        conditional HTTP requests.  For more information, see `Specifying Conditional Headers for Blob
+        Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+        To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param resource: Required only for Create File and Create Directory. The value must be "file"
+         or "directory". Known values are: "directory" and "file". Default value is None.
+        :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+         behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+         will be "posix". Known values are: "legacy" and "posix". Default value is None.
+        :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+        :param rename_source: An optional file or directory to be renamed.  The value must have the
+         following format: "/{filesystem}/{path}".  If "x-ms-properties" is specified, the properties
+         will overwrite the existing properties; otherwise, the existing properties will be preserved.
+         This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+         characters in the ISO-8859-1 character set. Default value is None.
+        :type rename_source: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+         When creating a file or directory and the parent folder does not have a default ACL, the umask
+         restricts the permissions of the file or directory to be created.  The resulting permission is
+         given by p bitwise and not u, where p is the permission and u is the umask.  For example, if p
+         is 0777 and u is 0057, then the resulting permission is 0720.  The default permission is 0777
+         for a directory and 0666 for a file.  The default umask is 0027.  The umask must be specified
+         in 4-digit octal notation (e.g. 0766). Default value is None.
+        :type umask: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Default value is None.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :param encryption_context: Specifies the encryption context to set on the file. Default value
+         is None.
+        :type encryption_context: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _cache_control = None
+        _content_encoding = None
+        _content_language = None
+        _content_disposition = None
+        _content_type_parameter = None
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=resource,
+            continuation=continuation,
+            mode=mode,
+            cache_control=_cache_control,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            content_disposition=_content_disposition,
+            content_type_parameter=_content_type_parameter,
+            rename_source=rename_source,
+            lease_id=_lease_id,
+            source_lease_id=source_lease_id,
+            properties=properties,
+            permissions=permissions,
+            umask=umask,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            owner=owner,
+            group=group,
+            acl=acl,
+            proposed_lease_id=proposed_lease_id,
+            lease_duration=lease_duration,
+            expiry_options=expiry_options,
+            expires_on=expires_on,
+            encryption_context=encryption_context,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def update(
+        self,
+        action: Union[str, _models.PathUpdateAction],
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        body: IO[bytes],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        max_records: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        properties: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Optional[_models.SetAccessControlRecursiveResponse]:
+        # pylint: disable=line-too-long
+        """Append Data | Flush Data | Set Properties | Set Access Control.
+
+        Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+        sets properties for a file or directory, or sets access control for a file or directory. Data
+        can only be appended to a file. Concurrent writes to the same file using multiple clients are
+        not supported. This operation supports conditional HTTP requests. For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+         flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+         directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+         a file or directory, or  "setAccessControlRecursive" to set the access control list for a
+         directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+         order to use access control.  Also note that the Access Control List (ACL) includes permissions
+         for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+         are mutually exclusive. Known values are: "append", "flush", "setProperties",
+         "setAccessControl", and "setAccessControlRecursive". Required.
+        :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+         maximum number of files or directories on which the acl change will be applied. If omitted or
+         greater than 2,000, the request will process up to 2,000 items. Default value is None.
+        :type max_records: int
+        :param continuation: Optional. The number of paths processed with each invocation is limited.
+         If the number of paths to be processed exceeds this limit, a continuation token is returned in
+         the response header x-ms-continuation. When a continuation token is  returned in the response,
+         it must be percent-encoded and specified in a subsequent invocation of
+         setAccessControlRecursive operation. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: SetAccessControlRecursiveResponse or None or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[Optional[_models.SetAccessControlRecursiveResponse]] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_update_request(
+            url=self._config.url,
+            action=action,
+            mode=mode,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            max_records=max_records,
+            continuation=continuation,
+            force_flag=force_flag,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            properties=properties,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        deserialized = None
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+            deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if response.status_code == 202:
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def lease(
+        self,
+        x_ms_lease_action: Union[str, _models.PathLeaseAction],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        x_ms_lease_break_period: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Lease Path.
+
+        Create and manage a lease to restrict write and delete access to the path. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+         and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+         to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+         lease break period is allowed to elapse, during which time no lease operation except break and
+         release can be performed on the file. When a lease is successfully broken, the response
+         indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+         the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+         change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+         existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. Known values
+         are: "acquire", "break", "change", "renew", "release", and "break". Required.
+        :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+         and  specifies the break period of the lease in seconds.  The lease break  duration must be
+         between 0 and 60 seconds. Default value is None.
+        :type x_ms_lease_break_period: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_lease_request(
+            url=self._config.url,
+            x_ms_lease_action=x_ms_lease_action,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            x_ms_lease_break_period=x_ms_lease_break_period,
+            lease_id=_lease_id,
+            proposed_lease_id=proposed_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            x_ms_lease_duration=self._config.x_ms_lease_duration,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 201, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 201:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 202:
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-time"] = self._deserialize("str", response.headers.get("x-ms-lease-time"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def read(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        x_ms_range_get_content_md5: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """Read File.
+
+        Read the contents of a file.  For read operations, range requests are supported. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+         to be retrieved. Default value is None.
+        :type range: str
+        :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+         together with the Range header, the service returns the MD5 hash for the range, as long as the
+         range is less than or equal to 4MB in size. If this header is specified without the Range
+         header, the service returns status code 400 (Bad Request). If this header is set to true when
+         the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default
+         value is None.
+        :type x_ms_range_get_content_md5: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_read_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            x_ms_range_get_content_md5=x_ms_range_get_content_md5,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        if response.status_code == 206:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-content-md5"] = self._deserialize("str", response.headers.get("x-ms-content-md5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+        upn: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get Properties | Get Status | Get Access Control List.
+
+        Get Properties returns all system and user defined properties for a path. Get Status returns
+        all system defined properties for a path. Get Access Control List returns the access control
+        list for a path. This operation supports conditional HTTP requests.  For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param action: Optional. If the value is "getStatus" only the system defined properties for the
+         path are returned. If the value is "getAccessControl" the access control list is returned in
+         the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+         properties are returned. Known values are: "getAccessControl" and "getStatus". Default value is
+         None.
+        :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            action=action,
+            upn=upn,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-permissions"] = self._deserialize("str", response.headers.get("x-ms-permissions"))
+        response_headers["x-ms-acl"] = self._deserialize("str", response.headers.get("x-ms-acl"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        recursive: Optional[bool] = None,
+        continuation: Optional[str] = None,
+        paginated: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete File | Delete Directory.
+
+        Delete the file or directory. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param recursive: Required. Default value is None.
+        :type recursive: bool
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param paginated: If true, paginated behavior will be seen. Pagination is for the recursive ACL
+         checks as a POSIX requirement in the server and Delete in an atomic operation once the ACL
+         checks are completed. If false or missing, normal default behavior will kick in, which may
+         timeout in case of very large directories due to recursive ACL checks. This new parameter is
+         introduced for backward compatibility. Default value is None.
+        :type paginated: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            recursive=recursive,
+            continuation=continuation,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            paginated=paginated,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-deletion-id"] = self._deserialize("str", response.headers.get("x-ms-deletion-id"))
+
+        if response.status_code == 202:
+            response_headers["Date"] = self._deserialize("str", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control(
+        self,
+        timeout: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_access_control_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control_recursive(
+        self,
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        max_records: Optional[int] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.SetAccessControlRecursiveResponse:
+        # pylint: disable=line-too-long
+        """Set the access control list for a path and sub-paths.
+
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param max_records: Optional. It specifies the maximum number of files or directories on which
+         the acl change will be applied. If omitted or greater than 2,000, the request will process up
+         to 2,000 items. Default value is None.
+        :type max_records: int
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: SetAccessControlRecursiveResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControlRecursive"] = kwargs.pop(
+            "action", _params.pop("action", "setAccessControlRecursive")
+        )
+        cls: ClsType[_models.SetAccessControlRecursiveResponse] = kwargs.pop("cls", None)
+
+        _request = build_set_access_control_recursive_request(
+            url=self._config.url,
+            mode=mode,
+            timeout=timeout,
+            continuation=continuation,
+            force_flag=force_flag,
+            max_records=max_records,
+            acl=acl,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def flush_data(
+        self,
+        timeout: Optional[int] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_flush_data_request(
+            url=self._config.url,
+            timeout=timeout,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def append_data(
+        self,
+        body: IO[bytes],
+        position: Optional[int] = None,
+        timeout: Optional[int] = None,
+        content_length: Optional[int] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        flush: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Append data to the file.
+
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param flush: If file should be flushed after the append. Default value is None.
+        :type flush: bool
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _transactional_content_hash = None
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _transactional_content_hash = path_http_headers.transactional_content_hash
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        _content = body
+
+        _request = build_append_data_request(
+            url=self._config.url,
+            position=position,
+            timeout=timeout,
+            content_length=content_length,
+            transactional_content_hash=_transactional_content_hash,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            flush=flush,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            action=action,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_expiry(
+        self,
+        expiry_options: Union[str, _models.PathExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def undelete(
+        self,
+        timeout: Optional[int] = None,
+        undelete_source: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a path that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+         the soft deleted blob to undelete. Default value is None.
+        :type undelete_source: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            undelete_source=undelete_source,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000..0e0243e9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,161 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterable, Callable, Dict, Literal, Optional, TypeVar
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._service_operations import build_list_file_systems_request
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def list_file_systems(
+        self,
+        prefix: Optional[str] = None,
+        continuation: Optional[str] = None,
+        max_results: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncIterable["_models.FileSystem"]:
+        # pylint: disable=line-too-long
+        """List FileSystems.
+
+        List filesystems and their properties in given account.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: An iterator like instance of either FileSystem or the result of cls(response)
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystem]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+        cls: ClsType[_models.FileSystemList] = kwargs.pop("cls", None)
+
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        def prepare_request(next_link=None):
+            if not next_link:
+
+                _request = build_list_file_systems_request(
+                    url=self._config.url,
+                    prefix=prefix,
+                    continuation=continuation,
+                    max_results=max_results,
+                    request_id_parameter=request_id_parameter,
+                    timeout=timeout,
+                    resource=resource,
+                    version=self._config.version,
+                    headers=_headers,
+                    params=_params,
+                )
+                _request.url = self._client.format_url(_request.url)
+
+            else:
+                _request = HttpRequest("GET", next_link)
+                _request.url = self._client.format_url(_request.url)
+                _request.method = "GET"
+            return _request
+
+        async def extract_data(pipeline_response):
+            deserialized = self._deserialize("FileSystemList", pipeline_response)
+            list_of_elem = deserialized.filesystems
+            if cls:
+                list_of_elem = cls(list_of_elem)  # type: ignore
+            return None, AsyncList(list_of_elem)
+
+        async def get_next(next_link=None):
+            _request = prepare_request(next_link)
+
+            _stream = False
+            pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+                _request, stream=_stream, **kwargs
+            )
+            response = pipeline_response.http_response
+
+            if response.status_code not in [200]:
+                map_error(status_code=response.status_code, response=response, error_map=error_map)
+                error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+                raise HttpResponseError(response=response, model=error)
+
+            return pipeline_response
+
+        return AsyncItemPaged(get_next, extract_data)