about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/storage
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/azure/storage
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/storage')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/__init__.py263
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client.py3314
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client_helpers.py1246
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client.py788
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client_helpers.py27
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client.py1620
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client_helpers.py266
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_deserialize.py234
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_download.py933
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_encryption.py1127
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_azure_blob_storage.py119
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_configuration.py51
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_serialization.py2050
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_azure_blob_storage.py121
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_configuration.py51
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/__init__.py35
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_append_blob_operations.py740
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_blob_operations.py3211
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_block_blob_operations.py1167
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_container_operations.py1818
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py1459
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_service_operations.py755
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/__init__.py184
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py392
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_models_py3.py2771
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/__init__.py35
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_append_blob_operations.py1118
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_blob_operations.py4642
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_block_blob_operations.py1790
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_container_operations.py2648
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_page_blob_operations.py2218
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_service_operations.py1063
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/py.typed1
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_lease.py341
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_list_blobs_helper.py328
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_models.py1507
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_quick_query_helper.py194
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_serialize.py214
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/__init__.py54
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/authentication.py245
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/__init__.py5
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io.py435
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io_async.py419
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile.py257
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile_async.py210
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/schema.py1178
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client.py458
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client_async.py280
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/constants.py19
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/models.py585
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/parser.py53
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies.py694
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies_async.py296
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/request_handlers.py270
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/response_handlers.py200
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/shared_access_signature.py252
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads.py604
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads_async.py460
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_shared_access_signature.py699
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_upload_helpers.py354
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/_version.py7
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py166
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py3215
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py799
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py1611
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py872
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py72
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py346
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py249
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py199
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py334
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py110
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py759
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py983
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py273
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py633
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py241
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py1074
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py112
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py2050
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py114
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py628
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py1968
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py161
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py90
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py1041
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py888
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py2845
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py208
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed1
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py173
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py1158
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py1118
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py73
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py185
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py54
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py245
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py458
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py280
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py19
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py585
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py53
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py694
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py296
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py270
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py200
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py252
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py604
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py460
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py462
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py105
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py7
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py24
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py721
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py735
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py269
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py570
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py82
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py1004
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py176
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py40
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py901
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py104
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed0
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/__init__.py99
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_deserialize.py85
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client.py983
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client_helpers.py61
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_download.py524
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client.py1739
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client_helpers.py145
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_azure_file_storage.py130
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_configuration.py77
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_serialization.py2050
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/__init__.py29
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_azure_file_storage.py132
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_configuration.py77
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/__init__.py31
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_directory_operations.py1056
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_file_operations.py2518
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_service_operations.py284
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_share_operations.py1765
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/__init__.py130
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_azure_file_storage_enums.py222
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_models_py3.py1711
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/__init__.py31
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_directory_operations.py1570
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_file_operations.py3755
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_patch.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_service_operations.py410
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_share_operations.py2595
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/py.typed1
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_lease.py251
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_models.py1294
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_parser.py60
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_serialize.py195
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client.py995
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client_helpers.py75
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client.py489
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client_helpers.py23
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/__init__.py54
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/authentication.py244
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client.py458
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client_async.py280
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/constants.py19
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/models.py585
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/parser.py53
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies.py694
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies_async.py296
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/request_handlers.py270
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/response_handlers.py200
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/shared_access_signature.py243
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads.py604
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads_async.py460
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared_access_signature.py574
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/_version.py7
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py20
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py988
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py502
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py1740
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py249
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py208
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py991
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py490
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/fileshare/py.typed0
211 files changed, 119904 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/__init__.py
new file mode 100644
index 00000000..23865956
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/__init__.py
@@ -0,0 +1,263 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import os
+
+from typing import Any, AnyStr, cast, Dict, IO, Iterable, Optional, Union, TYPE_CHECKING
+from ._version import VERSION
+from ._blob_client import BlobClient
+from ._container_client import ContainerClient
+from ._blob_service_client import BlobServiceClient
+from ._lease import BlobLeaseClient
+from ._download import StorageStreamDownloader
+from ._quick_query_helper import BlobQueryReader
+from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.response_handlers import PartialBatchErrorException
+from ._shared.models import (
+    LocationMode,
+    ResourceTypes,
+    AccountSasPermissions,
+    StorageErrorCode,
+    UserDelegationKey,
+    Services
+)
+from ._generated.models import RehydratePriority
+from ._models import (
+    BlobType,
+    BlockState,
+    StandardBlobTier,
+    PremiumPageBlobTier,
+    BlobImmutabilityPolicyMode,
+    SequenceNumberAction,
+    PublicAccess,
+    BlobAnalyticsLogging,
+    Metrics,
+    RetentionPolicy,
+    StaticWebsite,
+    CorsRule,
+    ContainerProperties,
+    BlobProperties,
+    FilteredBlob,
+    LeaseProperties,
+    ContentSettings,
+    CopyProperties,
+    BlobBlock,
+    PageRange,
+    AccessPolicy,
+    ContainerSasPermissions,
+    BlobSasPermissions,
+    CustomerProvidedEncryptionKey,
+    ContainerEncryptionScope,
+    BlobQueryError,
+    DelimitedJsonDialect,
+    DelimitedTextDialect,
+    QuickQueryDialect,
+    ArrowDialect,
+    ArrowType,
+    ObjectReplicationPolicy,
+    ObjectReplicationRule,
+    ImmutabilityPolicy,
+)
+from ._list_blobs_helper import BlobPrefix
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+
+__version__ = VERSION
+
+
+def upload_blob_to_url(
+    blob_url: str,
+    data: Union[Iterable[AnyStr], IO[AnyStr]],
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+    **kwargs: Any
+) -> Dict[str, Any]:
+    """Upload data to a given URL
+
+    The data will be uploaded as a block blob.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param data:
+        The data to upload. This can be bytes, text, an iterable or a file-like object.
+    :type data: bytes or str or Iterable
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the blob to be uploaded should overwrite the current data.
+        If True, upload_blob_to_url will overwrite any existing data. If set to False, the
+        operation will fail with a ResourceExistsError.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword dict(str,str) metadata:
+        Name-value pairs associated with the blob as metadata.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :keyword str encoding:
+        Encoding to use if text is supplied as input. Defaults to UTF-8.
+    :returns: Blob-updated property dict (Etag and last modified)
+    :rtype: dict(str, Any)
+    """
+    with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        return cast(BlobClient, client).upload_blob(data=data, blob_type=BlobType.BLOCKBLOB, **kwargs)
+
+
+def _download_to_stream(client: BlobClient, handle: IO[bytes], **kwargs: Any) -> None:
+    """
+    Download data to specified open file-handle.
+
+    :param BlobClient client: The BlobClient to download with.
+    :param Stream handle: A Stream to download the data into.
+    """
+    stream = client.download_blob(**kwargs)
+    stream.readinto(handle)
+
+
+def download_blob_from_url(
+    blob_url: str,
+    output: Union[str, IO[bytes]],
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+    **kwargs: Any
+) -> None:
+    """Download the contents of a blob to a local file or stream.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param output:
+        Where the data should be downloaded to. This could be either a file path to write to,
+        or an open IO handle to write to.
+    :type output: str or writable stream.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the local file should be overwritten if it already exists. The default value is
+        `False` - in which case a ValueError will be raised if the file already exists. If set to
+        `True`, an attempt will be made to write to the existing file. If a stream handle is passed
+        in, this value is ignored.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int offset:
+        Start of byte range to use for downloading a section of the blob.
+        Must be set if length is provided.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :rtype: None
+    """
+    overwrite = kwargs.pop('overwrite', False)
+    with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        if hasattr(output, 'write'):
+            _download_to_stream(client, cast(IO[bytes], output), **kwargs)
+        else:
+            if not overwrite and os.path.isfile(output):
+                raise ValueError(f"The file '{output}' already exists.")
+            with open(output, 'wb') as file_handle:
+                _download_to_stream(client, file_handle, **kwargs)
+
+
+__all__ = [
+    'upload_blob_to_url',
+    'download_blob_from_url',
+    'BlobServiceClient',
+    'ContainerClient',
+    'BlobClient',
+    'BlobType',
+    'BlobLeaseClient',
+    'StorageErrorCode',
+    'UserDelegationKey',
+    'ExponentialRetry',
+    'LinearRetry',
+    'LocationMode',
+    'BlockState',
+    'StandardBlobTier',
+    'PremiumPageBlobTier',
+    'SequenceNumberAction',
+    'BlobImmutabilityPolicyMode',
+    'ImmutabilityPolicy',
+    'PublicAccess',
+    'BlobAnalyticsLogging',
+    'Metrics',
+    'RetentionPolicy',
+    'StaticWebsite',
+    'CorsRule',
+    'ContainerProperties',
+    'BlobProperties',
+    'BlobPrefix',
+    'FilteredBlob',
+    'LeaseProperties',
+    'ContentSettings',
+    'CopyProperties',
+    'BlobBlock',
+    'PageRange',
+    'AccessPolicy',
+    'QuickQueryDialect',
+    'ContainerSasPermissions',
+    'BlobSasPermissions',
+    'ResourceTypes',
+    'AccountSasPermissions',
+    'StorageStreamDownloader',
+    'CustomerProvidedEncryptionKey',
+    'RehydratePriority',
+    'generate_account_sas',
+    'generate_container_sas',
+    'generate_blob_sas',
+    'PartialBatchErrorException',
+    'ContainerEncryptionScope',
+    'BlobQueryError',
+    'DelimitedJsonDialect',
+    'DelimitedTextDialect',
+    'ArrowDialect',
+    'ArrowType',
+    'BlobQueryReader',
+    'ObjectReplicationPolicy',
+    'ObjectReplicationRule',
+    'Services',
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client.py
new file mode 100644
index 00000000..90049ff8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client.py
@@ -0,0 +1,3314 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import warnings
+from datetime import datetime
+from functools import partial
+from typing import (
+    Any, AnyStr, cast, Dict, IO, Iterable, List, Optional, overload, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._blob_client_helpers import (
+    _abort_copy_options,
+    _append_block_from_url_options,
+    _append_block_options,
+    _clear_page_options,
+    _commit_block_list_options,
+    _create_append_blob_options,
+    _create_page_blob_options,
+    _create_snapshot_options,
+    _delete_blob_options,
+    _download_blob_options,
+    _format_url,
+    _from_blob_url,
+    _get_blob_tags_options,
+    _get_block_list_result,
+    _get_page_ranges_options,
+    _parse_url,
+    _quick_query_options,
+    _resize_blob_options,
+    _seal_append_blob_options,
+    _set_blob_metadata_options,
+    _set_blob_tags_options,
+    _set_http_headers_options,
+    _set_sequence_number_options,
+    _stage_block_from_url_options,
+    _stage_block_options,
+    _start_copy_from_url_options,
+    _upload_blob_from_url_options,
+    _upload_blob_options,
+    _upload_page_options,
+    _upload_pages_from_url_options
+)
+from ._deserialize import (
+    deserialize_blob_properties,
+    deserialize_pipeline_response_into_cls,
+    get_page_ranges_result,
+    parse_tags
+)
+from ._download import StorageStreamDownloader
+from ._encryption import StorageEncryptionMixin, _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION
+from ._generated import AzureBlobStorage
+from ._generated.models import CpkInfo
+from ._lease import BlobLeaseClient
+from ._models import BlobBlock, BlobProperties, BlobQueryError, BlobType, PageRange, PageRangePaged
+from ._quick_query_helper import BlobQueryReader
+from ._shared.base_client import parse_connection_str, StorageAccountHostsMixin, TransportWrapper
+from ._shared.response_handlers import process_storage_error, return_response_headers
+from ._serialize import (
+    get_access_conditions,
+    get_api_version,
+    get_modify_conditions,
+    get_version_id
+)
+from ._upload_helpers import (
+    upload_append_blob,
+    upload_block_blob,
+    upload_page_blob
+)
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from azure.storage.blob import ContainerClient
+    from ._models import (
+        ContentSettings,
+        ImmutabilityPolicy,
+        PremiumPageBlobTier,
+        SequenceNumberAction,
+        StandardBlobTier
+    )
+
+
+class BlobClient(StorageAccountHostsMixin, StorageEncryptionMixin):  # pylint: disable=too-many-public-methods
+    """A client to interact with a specific blob, although that blob may not yet exist.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the blob,
+        use the :func:`from_blob_url` classmethod.
+    :param container_name: The container name for the blob.
+    :type container_name: str
+    :param blob_name: The name of the blob with which to interact. If specified, this value will override
+        a blob value specified in the blob URL.
+    :type blob_name: str
+    :param str snapshot:
+        The optional blob snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+        specifies the version of the blob to operate on.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication.py
+            :start-after: [START create_blob_client]
+            :end-before: [END create_blob_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
+
+        .. literalinclude:: ../samples/blob_samples_authentication.py
+            :start-after: [START create_blob_client_sas_url]
+            :end-before: [END create_blob_client_sas_url]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a SAS URL to a blob.
+    """
+    def __init__(
+        self, account_url: str,
+        container_name: str,
+        blob_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        parsed_url, sas_token, path_snapshot = _parse_url(
+            account_url=account_url,
+            container_name=container_name,
+            blob_name=blob_name)
+        self.container_name = container_name
+        self.blob_name = blob_name
+
+        if snapshot is not None and hasattr(snapshot, 'snapshot'):
+            self.snapshot = snapshot.snapshot
+        elif isinstance(snapshot, dict):
+            self.snapshot = snapshot['snapshot']
+        else:
+            self.snapshot = snapshot or path_snapshot
+        self.version_id = kwargs.pop('version_id', None)
+
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot)
+        super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        return _format_url(
+            container_name=self.container_name,
+            scheme=self.scheme,
+            blob_name=self.blob_name,
+            query_str=self._query_str,
+            hostname=hostname
+        )
+
+    @classmethod
+    def from_blob_url(
+        cls, blob_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name.
+
+        :param str blob_url:
+            The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type blob_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`. If specified, this will override
+            the snapshot in the url.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+        """
+        account_url, container_name, blob_name, path_snapshot = _from_blob_url(blob_url=blob_url, snapshot=snapshot)
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=path_snapshot, credential=credential, **kwargs
+        )
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        blob_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name: The container name for the blob.
+        :type container_name: str
+        :param blob_name: The name of the blob with which to interact.
+        :type blob_name: str
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_blob]
+                :end-before: [END auth_from_connection_string_blob]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=snapshot, credential=credential, **kwargs
+        )
+
+    @distributed_trace
+    def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account in which the blob resides.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return cast(Dict[str, str], self._client.blob.get_account_info(cls=return_response_headers, **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_blob_from_url(
+        self, source_url: str,
+        *,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Creates a new Block Blob where the content of the blob is read from a given URL.
+        The content of an existing blob is overwritten with the new blob.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            The source must either be public or must be authenticated via a shared
+            access signature as part of the url or using the source_authorization keyword.
+            If the source is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :keyword dict(str, str) metadata:
+            Name-value pairs associated with the blob as metadata.
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError.
+        :keyword bool include_source_blob_properties:
+            Indicates if properties from the source blob should be copied. Defaults to True.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :paramtype tags: dict(str, str)
+        :keyword bytearray source_content_md5:
+            Specify the md5 that is used to verify the integrity of the source bytes.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Blob-updated property Dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.block_blob.put_blob_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_blob(
+        self, data: Union[bytes, str, Iterable[AnyStr], IO[bytes]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, upload_blob only succeeds if the
+            blob's lease is active and matches this ID. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            A callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :returns: Blob-updated property Dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world.py
+                :start-after: [START upload_a_blob]
+                :end-before: [END upload_a_blob]
+                :language: python
+                :dedent: 12
+                :caption: Upload a blob to the container.
+        """
+        if self.require_encryption and not self.key_encryption_key:
+            raise ValueError("Encryption required but no key was provided.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_options(
+            data=data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        if blob_type == BlobType.BlockBlob:
+            return upload_block_blob(**options)
+        if blob_type == BlobType.PageBlob:
+            return upload_page_blob(**options)
+        return upload_append_blob(**options)
+
+    @overload
+    def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace
+    def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword Optional[str] encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            A callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world.py
+                :start-after: [START download_a_blob]
+                :end-before: [END download_a_blob]
+                :language: python
+                :dedent: 12
+                :caption: Download a blob.
+        """
+        if self.require_encryption and not (self.key_encryption_key or self.key_resolver_function):
+            raise ValueError("Encryption required but no key was provided.")
+        if length is not None and offset is None:
+            raise ValueError("Offset value must not be None if length is set.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _download_blob_options(
+            blob_name=self.blob_name,
+            container_name=self.container_name,
+            version_id=get_version_id(self.version_id, kwargs),
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        return StorageStreamDownloader(**options)
+
+    @distributed_trace
+    def query_blob(self, query_expression: str, **kwargs: Any) -> BlobQueryReader:
+        """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions.
+        This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data.
+
+        :param str query_expression:
+            Required. a query statement. For more details see
+            https://learn.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference.
+        :keyword Callable[~azure.storage.blob.BlobQueryError] on_error:
+            A function to be called on any processing errors returned by the service.
+        :keyword blob_format:
+            Optional. Defines the serialization of the data currently stored in the blob. The default is to
+            treat the blob data as CSV data formatted in the default dialect. This can be overridden with
+            a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum).
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string
+
+            .. note::
+                "ParquetDialect" is in preview, so some features may not work as intended.
+
+        :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect
+            or ~azure.storage.blob.QuickQueryDialect or str
+        :keyword output_format:
+            Optional. Defines the output serialization for the data stream. By default the data will be returned
+            as it is represented in the blob (Parquet formats default to DelimitedTextDialect).
+            By providing an output format, the blob data will be reformatted according to that profile.
+            This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect.
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string
+        :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect
+            or List[~azure.storage.blob.ArrowDialect] or ~azure.storage.blob.QuickQueryDialect or str
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A streaming object (BlobQueryReader)
+        :rtype: ~azure.storage.blob.BlobQueryReader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_query.py
+                :start-after: [START query]
+                :end-before: [END query]
+                :language: python
+                :dedent: 4
+                :caption: select/project on blob/or blob snapshot data by providing simple query expressions.
+        """
+        errors = kwargs.pop("on_error", None)
+        error_cls = kwargs.pop("error_cls", BlobQueryError)
+        encoding = kwargs.pop("encoding", None)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options, delimiter = _quick_query_options(self.snapshot, query_expression, **kwargs)
+        try:
+            headers, raw_response_body = self._client.blob.query(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return BlobQueryReader(
+            name=self.blob_name,
+            container=self.container_name,
+            errors=errors,
+            record_delimiter=delimiter,
+            encoding=encoding,
+            headers=headers,
+            response=raw_response_body,
+            error_cls=error_cls)
+
+    @distributed_trace
+    def delete_blob(self, delete_snapshots: Optional[str] = None, **kwargs: Any) -> None:
+        """Marks the specified blob for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob()
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob
+        and retains the blob for a specified number of days.
+        After the specified number of days, the blob's data is removed from the service during garbage collection.
+        Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
+        option. Soft-deleted blob can be restored using :func:`undelete` operation.
+
+        :param Optional[str] delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword Optional[str] version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. If specified, delete_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world.py
+                :start-after: [START delete_blob]
+                :end-before: [END delete_blob]
+                :language: python
+                :dedent: 12
+                :caption: Delete a blob.
+        """
+        options = _delete_blob_options(
+            snapshot=self.snapshot,
+            version_id=get_version_id(self.version_id, kwargs),
+            delete_snapshots=delete_snapshots,
+            **kwargs)
+        try:
+            self._client.blob.delete(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def undelete_blob(self, **kwargs: Any) -> None:
+        """Restores soft-deleted blobs or snapshots.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        If blob versioning is enabled, the base blob cannot be restored using this
+        method. Instead use :func:`start_copy_from_url` with the URL of the blob version
+        you wish to promote to the current version.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START undelete_blob]
+                :end-before: [END undelete_blob]
+                :language: python
+                :dedent: 8
+                :caption: Undeleting a blob.
+        """
+        try:
+            self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a blob exists with the defined parameters, and returns
+        False otherwise.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        try:
+            self._client.blob.get_properties(
+                snapshot=self.snapshot,
+                version_id=version_id,
+                **kwargs)
+            return True
+        # Encrypted with CPK
+        except ResourceExistsError:
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace
+    def get_blob_properties(self, **kwargs: Any) -> BlobProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the blob. It does not return the content of the blob.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to get properties.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: BlobProperties
+        :rtype: ~azure.storage.blob.BlobProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START get_blob_properties]
+                :end-before: [END get_blob_properties]
+                :language: python
+                :dedent: 8
+                :caption: Getting the properties for a blob.
+        """
+        # TODO: extract this out as _get_blob_properties_options
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        cpk = kwargs.pop('cpk', None)
+        cpk_info = None
+        if cpk:
+            if self.scheme.lower() != 'https':
+                raise ValueError("Customer provided encryption key must be used over HTTPS.")
+            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                               encryption_algorithm=cpk.algorithm)
+        try:
+            cls_method = kwargs.pop('cls', None)
+            if cls_method:
+                kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method)
+            blob_props = cast(BlobProperties, self._client.blob.get_properties(
+                timeout=kwargs.pop('timeout', None),
+                version_id=version_id,
+                snapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=kwargs.pop('cls', None) or deserialize_blob_properties,
+                cpk_info=cpk_info,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        blob_props.name = self.blob_name
+        if isinstance(blob_props, BlobProperties):
+            blob_props.container = self.container_name
+            blob_props.snapshot = self.snapshot
+        return blob_props
+
+    @distributed_trace
+    def set_http_headers(self, content_settings: Optional["ContentSettings"] = None, **kwargs: Any) -> Dict[str, Any]:
+        """Sets system properties on the blob.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        options = _set_http_headers_options(content_settings=content_settings, **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.blob.set_http_headers(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_blob_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets user-defined metadata for the blob as one or more name-value pairs.
+
+        :param metadata:
+            Dict containing name and value pairs. Each call to this operation
+            replaces all existing metadata attached to the blob. To remove all
+            metadata from the blob, call this operation with no metadata headers.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Union[str, datetime]]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _set_blob_metadata_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], self._client.blob.set_metadata(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_immutability_policy(
+        self, immutability_policy: "ImmutabilityPolicy",
+        **kwargs: Any
+    ) -> Dict[str, str]:
+        """The Set Immutability Policy operation sets the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+        return cast(Dict[str, str], self._client.blob.set_immutability_policy(
+            cls=return_response_headers, version_id=version_id, **kwargs))
+
+    @distributed_trace
+    def delete_immutability_policy(self, **kwargs: Any) -> None:
+        """The Delete Immutability Policy operation deletes the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        self._client.blob.delete_immutability_policy(version_id=version_id, **kwargs)
+
+    @distributed_trace
+    def set_legal_hold(self, legal_hold: bool, **kwargs: Any) -> Dict[str, Union[str, datetime, bool]]:
+        """The Set Legal Hold operation sets a legal hold on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, Union[str, datetime, bool]]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        return cast(Dict[str, Union[str, datetime, bool]], self._client.blob.set_legal_hold(
+            legal_hold, version_id=version_id, cls=return_response_headers, **kwargs))
+
+    @distributed_trace
+    def create_page_blob(
+        self, size: int,
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        premium_page_blob_tier: Optional[Union[str, "PremiumPageBlobTier"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Page Blob of the specified size.
+
+        :param int size:
+            This specifies the maximum size for the page blob, up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword int sequence_number:
+            Only for Page blobs. The sequence number is a user-controlled value that you can use to
+            track requests. The value of the sequence number must be between 0
+            and 2^63 - 1.The default value is 0.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_page_blob_options(
+            size=size,
+            content_settings=content_settings,
+            metadata=metadata,
+            premium_page_blob_tier=premium_page_blob_tier,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def create_append_blob(
+        self, content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Append Blob. This operation creates a new 0-length append blob. The content
+        of any existing blob is overwritten with the newly initialized append blob. To add content to
+        the append blob, call the :func:`append_block` or :func:`append_block_from_url` method.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_append_blob_options(
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], self._client.append_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def create_snapshot(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a snapshot of the blob.
+
+        A snapshot is a read-only version of a blob that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a blob as it appears at a moment in time.
+
+        A snapshot of a blob has the same name as the base blob from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+            .. versionadded:: 12.4.0
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START create_blob_snapshot]
+                :end-before: [END create_blob_snapshot]
+                :language: python
+                :dedent: 8
+                :caption: Create a snapshot of the blob.
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_snapshot_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.blob.create_snapshot(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def start_copy_from_url(
+        self, source_url: str,
+        metadata: Optional[Dict[str, str]] = None,
+        incremental_copy: bool = False,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Copies a blob from the given URL.
+
+        This operation returns a dictionary containing `copy_status` and `copy_id`,
+        which can be used to check the status of or abort the copy operation.
+        `copy_status` will be 'success' if the copy completed synchronously or
+        'pending' if the copy has been started asynchronously. For asynchronous copies,
+        the status can be checked by polling the :func:`get_blob_properties` method and
+        checking the copy status. Set `requires_sync` to True to force the copy to be synchronous.
+        The Blob service copies blobs on a best-effort basis.
+
+        The source blob for a copy operation may be a block blob, an append blob,
+        or a page blob. If the destination blob already exists, it must be of the
+        same blob type as the source blob. Any existing destination blob will be
+        overwritten. The destination blob cannot be modified while a copy operation
+        is in progress.
+
+        When copying from a page blob, the Blob service creates a destination page
+        blob of the source blob's length, initially containing all zeroes. Then
+        the source page ranges are enumerated, and non-empty ranges are copied.
+
+        For a block blob or an append blob, the Blob service creates a committed
+        blob of zero length before returning from this operation. When copying
+        from a block blob, all committed blocks and their block IDs are copied.
+        Uncommitted blocks are not copied. At the end of the copy operation, the
+        destination blob will have the same committed block count as the source.
+
+        When copying from an append blob, all committed blocks are copied. At the
+        end of the copy operation, the destination blob will have the same committed
+        block count as the source.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :param metadata:
+            Name-value pairs associated with the blob as metadata. If no name-value
+            pairs are specified, the operation will copy the metadata from the
+            source blob or file to the destination blob. If one or more name-value
+            pairs are specified, the destination blob is created with the specified
+            metadata, and metadata is not copied from the source blob or file.
+        :type metadata: dict(str, str)
+        :param bool incremental_copy:
+            Copies the snapshot of the source page blob to a destination page blob.
+            The snapshot is copied such that only the differential changes between
+            the previously copied snapshot are transferred to the destination.
+            The copied snapshots are complete copies of the original snapshot and
+            can be read or copied from as usual. Defaults to False.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_).
+
+            The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob.
+            This option is only available when `incremental_copy=False` and `requires_sync=True`.
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str) or Literal["COPY"]
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has been modified since the specified date/time.
+            If the destination blob has not been modified, the Blob service returns
+            status code 412 (Precondition Failed).
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has not been modified since the specified
+            date/time. If the destination blob has been modified, the Blob service
+            returns status code 412 (Precondition Failed).
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword source_lease:
+            Specify this to perform the Copy Blob operation only if
+            the lease ID given matches the active lease ID of the source blob.
+        :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword bool seal_destination_blob:
+            Seal the destination append blob. This operation is only for append blob.
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool requires_sync:
+            Enforces that the service will not return a response until the copy is complete.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string. This option is only available when `incremental_copy` is
+            set to False and `requires_sync` is set to True.
+
+            .. versionadded:: 12.9.0
+
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.10.0
+
+        :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
+        :rtype: dict[str, Union[str, ~datetime.datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START copy_blob_from_url]
+                :end-before: [END copy_blob_from_url]
+                :language: python
+                :dedent: 12
+                :caption: Copy a blob from a URL.
+        """
+        options = _start_copy_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            incremental_copy=incremental_copy,
+            **kwargs)
+        try:
+            if incremental_copy:
+                return cast(Dict[str, Union[str, datetime]], self._client.page_blob.copy_incremental(**options))
+            return cast(Dict[str, Union[str, datetime]], self._client.blob.start_copy_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def abort_copy(
+        self, copy_id: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination blob with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID string, or an
+            instance of BlobProperties.
+        :type copy_id: str or ~azure.storage.blob.BlobProperties
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START abort_copy_blob_from_url]
+                :end-before: [END abort_copy_blob_from_url]
+                :language: python
+                :dedent: 12
+                :caption: Abort copying a blob from URL.
+        """
+        options = _abort_copy_options(copy_id, **kwargs)
+        try:
+            self._client.blob.abort_copy_from_url(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def acquire_lease(self, lease_duration: int =-1, lease_id: Optional[str] = None, **kwargs: Any) -> BlobLeaseClient:
+        """Requests a new lease.
+
+        If the blob does not have an active lease, the Blob
+        Service creates a lease on the blob and returns a new lease.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object.
+        :rtype: ~azure.storage.blob.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START acquire_lease_on_blob]
+                :end-before: [END acquire_lease_on_blob]
+                :language: python
+                :dedent: 8
+                :caption: Acquiring a lease on a blob.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id)
+        lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace
+    def set_standard_blob_tier(self, standard_blob_tier: Union[str, "StandardBlobTier"], **kwargs: Any) -> None:
+        """This operation sets the tier on a block blob.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        if standard_blob_tier is None:
+            raise ValueError("A StandardBlobTier must be specified")
+        if self.snapshot and kwargs.get('version_id'):
+            raise ValueError("Snapshot and version_id cannot be set at the same time")
+        try:
+            self._client.blob.set_tier(
+                tier=standard_blob_tier,
+                snapshot=self.snapshot,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                lease_access_conditions=access_conditions,
+                version_id=version_id,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def stage_block(
+        self, block_id: str,
+        data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param data: The blob data.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length: Size of the block.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob property dict.
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_options(
+            block_id=block_id,
+            data=data,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.block_blob.stage_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def stage_block_from_url(
+        self, block_id: str,
+        source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        source_content_md5: Optional[Union[bytes, bytearray]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob where
+        the contents are read from a URL.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param str source_url: The URL.
+        :param int source_offset:
+            Start of byte range to use for the block.
+            Must be set if source length is provided.
+        :param int source_length: The size of the block in bytes.
+        :param bytearray source_content_md5:
+            Specify the md5 calculated for the range of
+            bytes that must be read from the copy source.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Blob property dict.
+        :rtype: dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_from_url_options(
+            block_id=block_id,
+            source_url=source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            source_content_md5=source_content_md5,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.block_blob.stage_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_block_list(
+        self, block_list_type: str = "committed",
+        **kwargs: Any
+    ) -> Tuple[List[BlobBlock], List[BlobBlock]]:
+        """The Get Block List operation retrieves the list of blocks that have
+        been uploaded as part of a block blob.
+
+        :param str block_list_type:
+            Specifies whether to return the list of committed
+            blocks, the list of uncommitted blocks, or both lists together.
+            Possible values include: 'committed', 'uncommitted', 'all'
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A tuple of two lists - committed and uncommitted blocks
+        :rtype: Tuple[List[BlobBlock], List[BlobBlock]]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            blocks = self._client.block_blob.get_block_list(
+                list_type=block_list_type,
+                snapshot=self.snapshot,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return _get_block_list_result(blocks)
+
+    @distributed_trace
+    def commit_block_list(
+        self, block_list: List[BlobBlock],
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Commit Block List operation writes a blob by specifying the list of
+        block IDs that make up the blob.
+
+        :param list block_list:
+            List of Blockblobs.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict[str, str]
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _commit_block_list_options(
+            block_list=block_list,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.block_blob.commit_block_list(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_premium_page_blob_tier(self, premium_page_blob_tier: "PremiumPageBlobTier", **kwargs: Any) -> None:
+        """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        if premium_page_blob_tier is None:
+            raise ValueError("A PremiumPageBlobTier must be specified")
+        try:
+            self._client.blob.set_tier(
+                tier=premium_page_blob_tier,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_blob_tags(self, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+            Each call to this operation replaces all existing tags attached to the blob. To remove all
+            tags from the blob, call this operation with no tags set.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :type tags: dict(str, str)
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to add tags to.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the tags content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _set_blob_tags_options(version_id=version_id, tags=tags, **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.blob.set_tags(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_blob_tags(self, **kwargs: Any) -> Dict[str, str]:
+        """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :keyword Optional[str] version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to add tags to.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _get_blob_tags_options(version_id=version_id, snapshot=self.snapshot, **kwargs)
+        try:
+            _, tags = self._client.blob.get_tags(**options)
+            return cast(Dict[str, str], parse_tags(tags))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_page_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot_diff: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob.
+
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param str previous_snapshot_diff:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous blob snapshot to be compared
+            against a more recent snapshot or the current blob.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        warnings.warn(
+            "get_page_ranges is deprecated, use list_page_ranges instead",
+            DeprecationWarning
+        )
+
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot_diff,
+            **kwargs)
+        try:
+            if previous_snapshot_diff:
+                ranges = self._client.page_blob.get_page_ranges_diff(**options)
+            else:
+                ranges = self._client.page_blob.get_page_ranges(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace
+    def list_page_ranges(
+        self,
+        *,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> ItemPaged[PageRange]:
+        """Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob. If `previous_snapshot` is specified, the result will be
+        a diff of changes between the target blob and the previous snapshot.
+
+        :keyword int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword previous_snapshot:
+            A snapshot value that specifies that the response will contain only pages that were changed
+            between target blob and previous snapshot. Changed pages include both updated and cleared
+            pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot`
+            is the older of the two.
+        :paramtype previous_snapshot: str or Dict[str, Any]
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int results_per_page:
+            The maximum number of page ranges to retrieve per API call.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of PageRange.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot,
+            **kwargs)
+
+        if previous_snapshot:
+            command = partial(
+                self._client.page_blob.get_page_ranges_diff,
+                **options)
+        else:
+            command = partial(
+                self._client.page_blob.get_page_ranges,
+                **options)
+        return ItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=PageRangePaged)
+
+    @distributed_trace
+    def get_page_range_diff_for_managed_disk(
+        self, previous_snapshot_url: str,
+        offset: Optional[int] = None,
+        length:Optional[int] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a managed disk or snapshot.
+
+        .. note::
+            This operation is only available for managed disk accounts.
+
+        .. versionadded:: 12.2.0
+            This operation was introduced in API version '2019-07-07'.
+
+        :param str previous_snapshot_url:
+            Specifies the URL of a previous snapshot of the managed disk.
+            The response will only contain pages that were changed between the target blob and
+            its previous snapshot.
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            prev_snapshot_url=previous_snapshot_url,
+            **kwargs)
+        try:
+            ranges = self._client.page_blob.get_page_ranges_diff(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace
+    def set_sequence_number(
+        self, sequence_number_action: Union[str, "SequenceNumberAction"],
+        sequence_number: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the blob sequence number.
+
+        :param str sequence_number_action:
+            This property indicates how the service should modify the blob's sequence
+            number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
+        :param str sequence_number:
+            This property sets the blob's sequence number. The sequence number is a
+            user-controlled property that you can use to track requests and manage
+            concurrency issues.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        options = _set_sequence_number_options(sequence_number_action, sequence_number=sequence_number, **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.update_sequence_number(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def resize_blob(self, size: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Resizes a page blob to the specified size.
+
+        If the specified value is less than the current size of the blob,
+        then all pages above the specified value are cleared.
+
+        :param int size:
+            Size used to resize blob. Maximum size for a page blob is up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _resize_blob_options(size=size, **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.resize(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_page(
+        self, page: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Upload Pages operation writes a range of pages to a page blob.
+
+        :param bytes page:
+            Content of the page.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_page_options(
+            page=page,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.upload_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_pages_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        The Upload Pages operation writes a range of pages to a page blob where
+        the contents are read from a URL.
+
+        :param str source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword bytes source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Response after uploading pages from specified URL.
+        :rtype: Dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_pages_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.upload_pages_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def clear_page(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Clears a range of pages.
+
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _clear_page_options(
+            offset=offset,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], self._client.page_blob.clear_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def append_block(
+        self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """Commits a new block of data to the end of the existing append blob.
+
+        :param data:
+            Content of the block. This can be bytes, text, an iterable or a file-like object.
+        :type data: bytes or str or Iterable
+        :param int length:
+            Size of the block in bytes.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_options(
+            data=data,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], self._client.append_blob.append_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def append_block_from_url(
+        self, copy_source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """
+        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
+
+        :param str copy_source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int source_offset:
+            This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source.
+        :param int source_length:
+            This indicates the end of the range of bytes that has to be taken from the copy source.
+        :keyword bytearray source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the
+            AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after appending a new block.
+        :rtype: Dict[str, Union[str, datetime, int]]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_from_url_options(
+            copy_source_url=copy_source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Union[str, datetime, int]],
+                        self._client.append_blob.append_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def seal_append_blob(self, **kwargs: Any) -> Dict[str, Union[str, datetime, int]]:
+        """The Seal operation seals the Append Blob to make it read-only.
+
+            .. versionadded:: 12.4.0
+
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        options = _seal_append_blob_options(**kwargs)
+        try:
+            return cast(Dict[str, Any], self._client.append_blob.seal(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def _get_container_client(self) -> "ContainerClient":
+        """Get a client to interact with the blob's parent container.
+
+        The container need not already exist. Defaults to current blob's credentials.
+
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START get_container_client_from_blob_client]
+                :end-before: [END get_container_client_from_blob_client]
+                :language: python
+                :dedent: 8
+                :caption: Get container client from blob object.
+        """
+        from ._container_client import ContainerClient
+        if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access
+            _pipeline = Pipeline(
+                transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=self._pipeline._impl_policies # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return ContainerClient(
+            f"{self.scheme}://{self.primary_hostname}", container_name=self.container_name,
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client_helpers.py
new file mode 100644
index 00000000..a04f0ea0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_client_helpers.py
@@ -0,0 +1,1246 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines
+
+from io import BytesIO
+from typing import (
+    Any, AnyStr, AsyncGenerator, AsyncIterable, cast,
+    Dict, IO, Iterable, List, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote, urlparse
+
+from ._deserialize import deserialize_blob_stream
+from ._encryption import modify_user_agent_for_encryption, _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION
+from ._generated.models import (
+    AppendPositionAccessConditions,
+    BlobHTTPHeaders,
+    BlockList,
+    BlockLookupList,
+    CpkInfo,
+    DeleteSnapshotsOptionType,
+    QueryRequest,
+    SequenceNumberAccessConditions
+)
+from ._models import (
+    BlobBlock,
+    BlobProperties,
+    BlobType,
+    DelimitedJsonDialect,
+    DelimitedTextDialect,
+    PremiumPageBlobTier,
+    QuickQueryDialect
+)
+from ._serialize import (
+    get_access_conditions,
+    get_cpk_scope_info,
+    get_modify_conditions,
+    get_source_conditions,
+    serialize_blob_tags_header,
+    serialize_blob_tags,
+    serialize_query_format
+)
+from ._shared import encode_base64
+from ._shared.base_client import parse_query
+from ._shared.request_handlers import (
+    add_metadata_headers,
+    get_length,
+    read_length,
+    validate_and_format_range_headers
+)
+from ._shared.response_handlers import return_headers_and_deserialized, return_response_headers
+from ._shared.uploads import IterStreamer
+from ._shared.uploads_async import AsyncIterStreamer
+from ._upload_helpers import _any_conditions
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+    from ._generated import AzureBlobStorage
+    from ._models import ContentSettings
+    from ._shared.models import StorageConfiguration
+
+
+def _parse_url(
+    account_url: str,
+    container_name: str,
+    blob_name: str
+) -> Tuple["ParseResult", Optional[str], Optional[str]]:
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+
+    if not (container_name and blob_name):
+        raise ValueError("Please specify a container name and blob name.")
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+
+    path_snapshot, sas_token = parse_query(parsed_url.query)
+
+    return parsed_url, sas_token, path_snapshot
+
+def _format_url(container_name: Union[bytes, str], scheme: str, blob_name: str, query_str: str, hostname: str) -> str:
+    if isinstance(container_name, str):
+        container_name = container_name.encode('UTF-8')
+    return f"{scheme}://{hostname}/{quote(container_name)}/{quote(blob_name, safe='~/')}{query_str}"
+
+def _encode_source_url(source_url: str) -> str:
+    parsed_source_url = urlparse(source_url)
+    source_scheme = parsed_source_url.scheme
+    source_hostname = parsed_source_url.netloc.rstrip('/')
+    source_path = unquote(parsed_source_url.path)
+    source_query = parsed_source_url.query
+    result = [f"{source_scheme}://{source_hostname}{quote(source_path, safe='~/')}"]
+    if source_query:
+        result.append(source_query)
+    return '?'.join(result)
+
+def _upload_blob_options(  # pylint:disable=too-many-statements
+    data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[bytes]],
+    blob_type: Union[str, BlobType],
+    length: Optional[int],
+    metadata: Optional[Dict[str, str]],
+    encryption_options: Dict[str, Any],
+    config: "StorageConfiguration",
+    sdk_moniker: str,
+    client: "AzureBlobStorage",
+    **kwargs: Any
+) -> Dict[str, Any]:
+    encoding = kwargs.pop('encoding', 'UTF-8')
+    if isinstance(data, str):
+        data = data.encode(encoding)
+    if length is None:
+        length = get_length(data)
+    if isinstance(data, bytes):
+        data = data[:length]
+
+    stream: Optional[Any] = None
+    if isinstance(data, bytes):
+        stream = BytesIO(data)
+    elif hasattr(data, 'read'):
+        stream = data
+    elif hasattr(data, '__iter__') and not isinstance(data, (list, tuple, set, dict)):
+        stream = IterStreamer(data, encoding=encoding)
+    elif hasattr(data, '__aiter__'):
+        stream = AsyncIterStreamer(cast(AsyncGenerator, data), encoding=encoding)
+    else:
+        raise TypeError(f"Unsupported data type: {type(data)}")
+
+    validate_content = kwargs.pop('validate_content', False)
+    content_settings = kwargs.pop('content_settings', None)
+    overwrite = kwargs.pop('overwrite', False)
+    max_concurrency = kwargs.pop('max_concurrency', 1)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    kwargs['cpk_info'] = cpk_info
+
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
+    kwargs['modified_access_conditions'] = get_modify_conditions(kwargs)
+    kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs)
+    if content_settings:
+        kwargs['blob_headers'] = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=content_settings.content_md5,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+    kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None))
+    kwargs['stream'] = stream
+    kwargs['length'] = length
+    kwargs['overwrite'] = overwrite
+    kwargs['headers'] = headers
+    kwargs['validate_content'] = validate_content
+    kwargs['blob_settings'] = config
+    kwargs['max_concurrency'] = max_concurrency
+    kwargs['encryption_options'] = encryption_options
+    # Add feature flag to user agent for encryption
+    if encryption_options['key']:
+        modify_user_agent_for_encryption(
+            config.user_agent_policy.user_agent,
+            sdk_moniker,
+            encryption_options['version'],
+            kwargs)
+
+    if blob_type == BlobType.BlockBlob:
+        kwargs['client'] = client.block_blob
+    elif blob_type == BlobType.PageBlob:
+        if (encryption_options['version'] == '2.0' and
+            (encryption_options['required'] or encryption_options['key'] is not None)):
+            raise ValueError("Encryption version 2.0 does not currently support page blobs.")
+        kwargs['client'] = client.page_blob
+    elif blob_type == BlobType.AppendBlob:
+        if encryption_options['required'] or (encryption_options['key'] is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        kwargs['client'] = client.append_blob
+    else:
+        raise ValueError(f"Unsupported BlobType: {blob_type}")
+    return kwargs
+
+def _upload_blob_from_url_options(source_url: str, **kwargs: Any) -> Dict[str, Any]:
+    metadata = kwargs.pop('metadata', None)
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    source_url = _encode_source_url(source_url=source_url)
+    tier = kwargs.pop('standard_blob_tier', None)
+    overwrite = kwargs.pop('overwrite', False)
+    content_settings = kwargs.pop('content_settings', None)
+    source_authorization = kwargs.pop('source_authorization', None)
+    if content_settings:
+        kwargs['blob_http_headers'] = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=None,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'copy_source_authorization': source_authorization,
+        'content_length': 0,
+        'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True),
+        'source_content_md5': kwargs.pop('source_content_md5', None),
+        'copy_source': source_url,
+        'modified_access_conditions': get_modify_conditions(kwargs),
+        'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)),
+        'cls': return_response_headers,
+        'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)),
+        'tier': tier.value if tier else None,
+        'source_modified_access_conditions': get_source_conditions(kwargs),
+        'cpk_info': cpk_info,
+        'cpk_scope_info': get_cpk_scope_info(kwargs),
+        'headers': headers,
+    }
+    options.update(kwargs)
+    if not overwrite and not _any_conditions(**options):
+        options['modified_access_conditions'].if_none_match = '*'
+    return options
+
+def _download_blob_options(
+    blob_name: str,
+    container_name: str,
+    version_id: Optional[str],
+    offset: Optional[int],
+    length: Optional[int],
+    encoding: Optional[str],
+    encryption_options: Dict[str, Any],
+    config: "StorageConfiguration",
+    sdk_moniker: str,
+    client: "AzureBlobStorage",
+    **kwargs
+) -> Dict[str, Any]:
+    """Creates a dictionary containing the options for a download blob operation.
+
+    :param str blob_name:
+        The name of the blob.
+    :param str container_name:
+        The name of the container.
+    :param Optional[str] version_id:
+        The version id parameter is a value that, when present, specifies the version of the blob to download.
+    :param Optional[int] offset:
+        Start of byte range to use for downloading a section of the blob. Must be set if length is provided.
+    :param Optional[int] length:
+        Number of bytes to read from the stream. This is optional, but should be supplied for optimal performance.
+    :param Optional[str] encoding:
+        Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+    :param Dict[str, Any] encryption_options:
+        The options for encryption, if enabled.
+    :param StorageConfiguration config:
+        The Storage configuration options.
+    :param str sdk_moniker:
+        The string representing the SDK package version.
+    :param AzureBlobStorage client:
+        The generated Blob Storage client.
+    :returns: A dictionary containing the download blob options.
+    :rtype: Dict[str, Any]
+    """
+    if length is not None:
+        if offset is None:
+            raise ValueError("Offset must be provided if length is provided.")
+        length = offset + length - 1  # Service actually uses an end-range inclusive index
+
+    validate_content = kwargs.pop('validate_content', False)
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    # Add feature flag to user agent for encryption
+    if encryption_options['key'] or encryption_options['resolver']:
+        modify_user_agent_for_encryption(
+            config.user_agent_policy.user_agent,
+            sdk_moniker,
+            encryption_options['version'],
+            kwargs)
+
+    options = {
+        'clients': client,
+        'config': config,
+        'start_range': offset,
+        'end_range': length,
+        'version_id': version_id,
+        'validate_content': validate_content,
+        'encryption_options': {
+            'required': encryption_options['required'],
+            'key': encryption_options['key'],
+            'resolver': encryption_options['resolver']},
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_info': cpk_info,
+        'download_cls': kwargs.pop('cls', None) or deserialize_blob_stream,
+        'max_concurrency':kwargs.pop('max_concurrency', 1),
+        'encoding': encoding,
+        'timeout': kwargs.pop('timeout', None),
+        'name': blob_name,
+        'container': container_name}
+    options.update(kwargs)
+    return options
+
+def _quick_query_options(snapshot: Optional[str], query_expression: str, **kwargs: Any ) -> Tuple[Dict[str, Any], str]:
+    delimiter = '\n'
+    input_format = kwargs.pop('blob_format', None)
+    if input_format == QuickQueryDialect.DelimitedJson:
+        input_format = DelimitedJsonDialect()
+    if input_format == QuickQueryDialect.DelimitedText:
+        input_format = DelimitedTextDialect()
+    input_parquet_format = input_format == "ParquetDialect"
+    if input_format and not input_parquet_format:
+        try:
+            delimiter = input_format.lineterminator
+        except AttributeError:
+            try:
+                delimiter = input_format.delimiter
+            except AttributeError as exc:
+                raise ValueError("The Type of blob_format can only be DelimitedTextDialect or "
+                                    "DelimitedJsonDialect or ParquetDialect") from exc
+    output_format = kwargs.pop('output_format', None)
+    if output_format == QuickQueryDialect.DelimitedJson:
+        output_format = DelimitedJsonDialect()
+    if output_format == QuickQueryDialect.DelimitedText:
+        output_format = DelimitedTextDialect()
+    if output_format:
+        if output_format == "ParquetDialect":
+            raise ValueError("ParquetDialect is invalid as an output format.")
+        try:
+            delimiter = output_format.lineterminator
+        except AttributeError:
+            try:
+                delimiter = output_format.delimiter
+            except AttributeError:
+                pass
+    else:
+        output_format = input_format if not input_parquet_format else None
+    query_request = QueryRequest(
+        expression=query_expression,
+        input_serialization=serialize_query_format(input_format),
+        output_serialization=serialize_query_format(output_format)
+    )
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(
+            encryption_key=cpk.key_value,
+            encryption_key_sha256=cpk.key_hash,
+            encryption_algorithm=cpk.algorithm
+        )
+    options = {
+        'query_request': query_request,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_info': cpk_info,
+        'snapshot': snapshot,
+        'timeout': kwargs.pop('timeout', None),
+        'cls': return_headers_and_deserialized,
+    }
+    options.update(kwargs)
+    return options, delimiter
+
+def _generic_delete_blob_options(delete_snapshots: Optional[str] = None, **kwargs: Any) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    if delete_snapshots:
+        delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots)
+    options = {
+        'timeout': kwargs.pop('timeout', None),
+        'snapshot': kwargs.pop('snapshot', None),  # this is added for delete_blobs
+        'delete_snapshots': delete_snapshots or None,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions}
+    options.update(kwargs)
+    return options
+
+def _delete_blob_options(
+    snapshot: Optional[str],
+    version_id: Optional[str],
+    delete_snapshots: Optional[str] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    if snapshot and delete_snapshots:
+        raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.")
+    options = _generic_delete_blob_options(delete_snapshots, **kwargs)
+    options['snapshot'] = snapshot
+    options['version_id'] = version_id
+    options['blob_delete_type'] = kwargs.pop('blob_delete_type', None)
+    return options
+
+def _set_http_headers_options(content_settings: Optional["ContentSettings"] = None, **kwargs: Any) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    blob_headers = None
+    if content_settings:
+        blob_headers = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=content_settings.content_md5,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+    options = {
+        'timeout': kwargs.pop('timeout', None),
+        'blob_http_headers': blob_headers,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _set_blob_metadata_options(metadata: Optional[Dict[str, str]] = None, **kwargs: Any):
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    options = {
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers,
+        'headers': headers}
+    options.update(kwargs)
+    return options
+
+def _create_page_blob_options(
+    size: int,
+    content_settings: Optional["ContentSettings"] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    premium_page_blob_tier: Optional[Union[str, "PremiumPageBlobTier"]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    blob_headers = None
+    if content_settings:
+        blob_headers = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=content_settings.content_md5,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+
+    sequence_number = kwargs.pop('sequence_number', None)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    immutability_policy = kwargs.pop('immutability_policy', None)
+    if immutability_policy:
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+
+    tier = None
+    if premium_page_blob_tier:
+        try:
+            tier = premium_page_blob_tier.value  # type: ignore
+        except AttributeError:
+            tier = premium_page_blob_tier  # type: ignore
+
+    blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+    options = {
+        'content_length': 0,
+        'blob_content_length': size,
+        'blob_sequence_number': sequence_number,
+        'blob_http_headers': blob_headers,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'blob_tags_string': blob_tags_string,
+        'cls': return_response_headers,
+        "tier": tier,
+        'headers': headers}
+    options.update(kwargs)
+    return options
+
+def _create_append_blob_options(
+    content_settings: Optional["ContentSettings"] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    blob_headers = None
+    if content_settings:
+        blob_headers = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=content_settings.content_md5,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    immutability_policy = kwargs.pop('immutability_policy', None)
+    if immutability_policy:
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+
+    blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+    options = {
+        'content_length': 0,
+        'blob_http_headers': blob_headers,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'blob_tags_string': blob_tags_string,
+        'cls': return_response_headers,
+        'headers': headers}
+    options.update(kwargs)
+    return options
+
+def _create_snapshot_options(metadata: Optional[Dict[str, str]] = None, **kwargs: Any) -> Dict[str, Any]:
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers,
+        'headers': headers}
+    options.update(kwargs)
+    return options
+
+def _start_copy_from_url_options(  # pylint:disable=too-many-statements
+    source_url: str,
+    metadata: Optional[Dict[str, str]] = None,
+    incremental_copy: bool = False,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    source_url = _encode_source_url(source_url=source_url)
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    if 'source_lease' in kwargs:
+        source_lease = kwargs.pop('source_lease')
+        try:
+            headers['x-ms-source-lease-id'] = source_lease.id
+        except AttributeError:
+            headers['x-ms-source-lease-id'] = source_lease
+
+    tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None)
+    tags = kwargs.pop('tags', None)
+
+    # Options only available for sync copy
+    requires_sync = kwargs.pop('requires_sync', None)
+    encryption_scope_str = kwargs.pop('encryption_scope', None)
+    source_authorization = kwargs.pop('source_authorization', None)
+    # If tags is a str, interpret that as copy_source_tags
+    copy_source_tags = isinstance(tags, str)
+
+    if incremental_copy:
+        if source_authorization:
+            raise ValueError("Source authorization tokens are not applicable for incremental copying.")
+        if copy_source_tags:
+            raise ValueError("Copying source tags is not applicable for incremental copying.")
+
+    # TODO: refactor start_copy_from_url api in _blob_client.py. Call _generated/_blob_operations.py copy_from_url
+    #  when requires_sync=True is set.
+    #  Currently both sync copy and async copy are calling _generated/_blob_operations.py start_copy_from_url.
+    #  As sync copy diverges more from async copy, more problem will surface.
+    if requires_sync is True:
+        headers['x-ms-requires-sync'] = str(requires_sync)
+        if encryption_scope_str:
+            headers['x-ms-encryption-scope'] = encryption_scope_str
+        if source_authorization:
+            headers['x-ms-copy-source-authorization'] = source_authorization
+        if copy_source_tags:
+            headers['x-ms-copy-source-tag-option'] = tags
+    else:
+        if encryption_scope_str:
+            raise ValueError(
+                "Encryption_scope is only supported for sync copy, please specify requires_sync=True")
+        if source_authorization:
+            raise ValueError(
+                "Source authorization tokens are only supported for sync copy, please specify requires_sync=True")
+        if copy_source_tags:
+            raise ValueError(
+                "Copying source tags is only supported for sync copy, please specify requires_sync=True")
+
+    timeout = kwargs.pop('timeout', None)
+    dest_mod_conditions = get_modify_conditions(kwargs)
+    blob_tags_string = serialize_blob_tags_header(tags) if not copy_source_tags else None
+
+    immutability_policy = kwargs.pop('immutability_policy', None)
+    if immutability_policy:
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+
+    options = {
+        'copy_source': source_url,
+        'seal_blob': kwargs.pop('seal_destination_blob', None),
+        'timeout': timeout,
+        'modified_access_conditions': dest_mod_conditions,
+        'blob_tags_string': blob_tags_string,
+        'headers': headers,
+        'cls': return_response_headers,
+    }
+    if not incremental_copy:
+        source_mod_conditions = get_source_conditions(kwargs)
+        dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None))
+        options['source_modified_access_conditions'] = source_mod_conditions
+        options['lease_access_conditions'] = dest_access_conditions
+        options['tier'] = tier.value if tier else None
+    options.update(kwargs)
+    return options
+
+def _abort_copy_options(copy_id: Union[str, Dict[str, Any], BlobProperties], **kwargs: Any) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    if isinstance(copy_id, BlobProperties):
+        copy_id = copy_id.copy.id  # type: ignore [assignment]
+    elif isinstance(copy_id, dict):
+        copy_id = copy_id['copy_id']
+    options = {
+        'copy_id': copy_id,
+        'lease_access_conditions': access_conditions,
+        'timeout': kwargs.pop('timeout', None)}
+    options.update(kwargs)
+    return options
+
+def _stage_block_options(
+    block_id: str,
+    data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+    length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    block_id = encode_base64(str(block_id))
+    if isinstance(data, str):
+        data = data.encode(kwargs.pop('encoding', 'UTF-8'))  # type: ignore
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    if length is None:
+        length = get_length(data)
+        if length is None:
+            length, data = read_length(data)
+    if isinstance(data, bytes):
+        data = data[:length]
+
+    validate_content = kwargs.pop('validate_content', False)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'block_id': block_id,
+        'content_length': length,
+        'body': data,
+        'transactional_content_md5': None,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'validate_content': validate_content,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers,
+    }
+    options.update(kwargs)
+    return options
+
+def _stage_block_from_url_options(
+    block_id: str,
+    source_url: str,
+    source_offset: Optional[int] = None,
+    source_length: Optional[int] = None,
+    source_content_md5: Optional[Union[bytes, bytearray]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    source_url = _encode_source_url(source_url=source_url)
+    source_authorization = kwargs.pop('source_authorization', None)
+    if source_length is not None and source_offset is None:
+        raise ValueError("Source offset value must not be None if length is set.")
+    if source_length is not None and source_offset is not None:
+        source_length = source_offset + source_length - 1
+    block_id = encode_base64(str(block_id))
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    range_header = None
+    if source_offset is not None:
+        range_header, _ = validate_and_format_range_headers(source_offset, source_length)
+
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    options = {
+        'copy_source_authorization': source_authorization,
+        'block_id': block_id,
+        'content_length': 0,
+        'source_url': source_url,
+        'source_range': range_header,
+        'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers,
+    }
+    options.update(kwargs)
+    return options
+
+def _get_block_list_result(blocks: BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]]:
+    committed = []
+    uncommitted = []
+    if blocks.committed_blocks:
+        committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks]  # pylint: disable=protected-access
+    if blocks.uncommitted_blocks:
+        uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks]  # pylint: disable=protected-access
+    return committed, uncommitted
+
+def _commit_block_list_options(
+    block_list: List[BlobBlock],
+    content_settings: Optional["ContentSettings"] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+    for block in block_list:
+        if isinstance(block, BlobBlock):
+            if block.state.value == 'committed':
+                cast(List[str], block_lookup.committed).append(encode_base64(str(block.id)))
+            elif block.state.value == 'uncommitted':
+                cast(List[str], block_lookup.uncommitted).append(encode_base64(str(block.id)))
+            elif block_lookup.latest is not None:
+                block_lookup.latest.append(encode_base64(str(block.id)))
+        else:
+            block_lookup.latest.append(encode_base64(str(block)))
+    headers = kwargs.pop('headers', {})
+    headers.update(add_metadata_headers(metadata))
+    blob_headers = None
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    if content_settings:
+        blob_headers = BlobHTTPHeaders(
+            blob_cache_control=content_settings.cache_control,
+            blob_content_type=content_settings.content_type,
+            blob_content_md5=content_settings.content_md5,
+            blob_content_encoding=content_settings.content_encoding,
+            blob_content_language=content_settings.content_language,
+            blob_content_disposition=content_settings.content_disposition
+        )
+
+    validate_content = kwargs.pop('validate_content', False)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    immutability_policy = kwargs.pop('immutability_policy', None)
+    if immutability_policy:
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+
+    tier = kwargs.pop('standard_blob_tier', None)
+    blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+    options = {
+        'blocks': block_lookup,
+        'blob_http_headers': blob_headers,
+        'lease_access_conditions': access_conditions,
+        'timeout': kwargs.pop('timeout', None),
+        'modified_access_conditions': mod_conditions,
+        'cls': return_response_headers,
+        'validate_content': validate_content,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'tier': tier.value if tier else None,
+        'blob_tags_string': blob_tags_string,
+        'headers': headers
+    }
+    options.update(kwargs)
+    return options
+
+def _set_blob_tags_options(
+    version_id: Optional[str],
+    tags: Optional[Dict[str, str]] = None,
+    **kwargs: Any
+)-> Dict[str, Any]:
+    serialized_tags = serialize_blob_tags(tags)
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+
+    options = {
+        'tags': serialized_tags,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'version_id': version_id,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _get_blob_tags_options(version_id: Optional[str], snapshot: Optional[str], **kwargs: Any) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+
+    options = {
+        'version_id': version_id,
+        'snapshot': snapshot,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'timeout': kwargs.pop('timeout', None),
+        'cls': return_headers_and_deserialized}
+    return options
+
+def _get_page_ranges_options(
+    snapshot: Optional[str],
+    offset: Optional[int] = None,
+    length: Optional[int] = None,
+    previous_snapshot_diff: Optional[Union[str, Dict[str, Any]]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    if length is not None and offset is None:
+        raise ValueError("Offset value must not be None if length is set.")
+    if length is not None and offset is not None:
+        length = offset + length - 1  # Reformat to an inclusive range index
+    page_range, _ = validate_and_format_range_headers(
+        offset, length, start_range_required=False, end_range_required=False, align_to_page=True
+    )
+    options = {
+        'snapshot': snapshot,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'timeout': kwargs.pop('timeout', None),
+        'range': page_range}
+    if previous_snapshot_diff:
+        try:
+            options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore
+        except AttributeError:
+            try:
+                options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore
+            except TypeError:
+                options['prevsnapshot'] = previous_snapshot_diff
+    options.update(kwargs)
+    return options
+
+def _set_sequence_number_options(
+    sequence_number_action: str,
+    sequence_number: Optional[str] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    if sequence_number_action is None:
+        raise ValueError("A sequence number action must be specified")
+    options = {
+        'sequence_number_action': sequence_number_action,
+        'timeout': kwargs.pop('timeout', None),
+        'blob_sequence_number': sequence_number,
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _resize_blob_options(size: int, **kwargs: Any) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    if size is None:
+        raise ValueError("A content length must be specified for a Page Blob.")
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    options = {
+        'blob_content_length': size,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _upload_page_options(
+    page: bytes,
+    offset: int,
+    length: int,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    if isinstance(page, str):
+        page = page.encode(kwargs.pop('encoding', 'UTF-8'))
+    if offset is None or offset % 512 != 0:
+        raise ValueError("offset must be an integer that aligns with 512 page size")
+    if length is None or length % 512 != 0:
+        raise ValueError("length must be an integer that aligns with 512 page size")
+    end_range = offset + length - 1  # Reformat to an inclusive range index
+    content_range = f'bytes={offset}-{end_range}' # type: ignore
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    seq_conditions = SequenceNumberAccessConditions(
+        if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+        if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+        if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+    )
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    validate_content = kwargs.pop('validate_content', False)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    options = {
+        'body': page[:length],
+        'content_length': length,
+        'transactional_content_md5': None,
+        'timeout': kwargs.pop('timeout', None),
+        'range': content_range,
+        'lease_access_conditions': access_conditions,
+        'sequence_number_access_conditions': seq_conditions,
+        'modified_access_conditions': mod_conditions,
+        'validate_content': validate_content,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _upload_pages_from_url_options(
+    source_url: str,
+    offset: int,
+    length: int,
+    source_offset: int,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    source_url = _encode_source_url(source_url=source_url)
+    # TODO: extract the code to a method format_range
+    if offset is None or offset % 512 != 0:
+        raise ValueError("offset must be an integer that aligns with 512 page size")
+    if length is None or length % 512 != 0:
+        raise ValueError("length must be an integer that aligns with 512 page size")
+    if source_offset is None or offset % 512 != 0:
+        raise ValueError("source_offset must be an integer that aligns with 512 page size")
+
+    # Format range
+    end_range = offset + length - 1
+    destination_range = f'bytes={offset}-{end_range}'
+    source_range = f'bytes={source_offset}-{source_offset + length - 1}'  # should subtract 1 here?
+
+    seq_conditions = SequenceNumberAccessConditions(
+        if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+        if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+        if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+    )
+    source_authorization = kwargs.pop('source_authorization', None)
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    source_mod_conditions = get_source_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    source_content_md5 = kwargs.pop('source_content_md5', None)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'copy_source_authorization': source_authorization,
+        'source_url': source_url,
+        'content_length': 0,
+        'source_range': source_range,
+        'range': destination_range,
+        'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'sequence_number_access_conditions': seq_conditions,
+        'modified_access_conditions': mod_conditions,
+        'source_modified_access_conditions': source_mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _clear_page_options(
+    offset: int,
+    length: int,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    seq_conditions = SequenceNumberAccessConditions(
+        if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+        if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+        if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+    )
+    mod_conditions = get_modify_conditions(kwargs)
+    if offset is None or offset % 512 != 0:
+        raise ValueError("offset must be an integer that aligns with 512 page size")
+    if length is None or length % 512 != 0:
+        raise ValueError("length must be an integer that aligns with 512 page size")
+    end_range = length + offset - 1  # Reformat to an inclusive range index
+    content_range = f'bytes={offset}-{end_range}'
+
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'content_length': 0,
+        'timeout': kwargs.pop('timeout', None),
+        'range': content_range,
+        'lease_access_conditions': access_conditions,
+        'sequence_number_access_conditions': seq_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _append_block_options(
+    data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+    length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    if isinstance(data, str):
+        data = data.encode(kwargs.pop('encoding', 'UTF-8'))
+    if length is None:
+        length = get_length(data)
+        if length is None:
+            length, data = read_length(data)
+    if length == 0:
+        return {}
+    if isinstance(data, bytes):
+        data = data[:length]
+
+    appendpos_condition = kwargs.pop('appendpos_condition', None)
+    maxsize_condition = kwargs.pop('maxsize_condition', None)
+    validate_content = kwargs.pop('validate_content', False)
+    append_conditions = None
+    if maxsize_condition or appendpos_condition is not None:
+        append_conditions = AppendPositionAccessConditions(
+            max_size=maxsize_condition,
+            append_position=appendpos_condition
+        )
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+    options = {
+        'body': data,
+        'content_length': length,
+        'timeout': kwargs.pop('timeout', None),
+        'transactional_content_md5': None,
+        'lease_access_conditions': access_conditions,
+        'append_position_access_conditions': append_conditions,
+        'modified_access_conditions': mod_conditions,
+        'validate_content': validate_content,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _append_block_from_url_options(
+    copy_source_url: str,
+    source_offset: Optional[int] = None,
+    source_length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    copy_source_url = _encode_source_url(source_url=copy_source_url)
+    # If end range is provided, start range must be provided
+    if source_length is not None and source_offset is None:
+        raise ValueError("source_offset should also be specified if source_length is specified")
+    # Format based on whether length is present
+    source_range = None
+    if source_length is not None and source_offset is not None:
+        end_range = source_offset + source_length - 1
+        source_range = f'bytes={source_offset}-{end_range}'
+    elif source_offset is not None:
+        source_range = f"bytes={source_offset}-"
+
+    appendpos_condition = kwargs.pop('appendpos_condition', None)
+    maxsize_condition = kwargs.pop('maxsize_condition', None)
+    source_content_md5 = kwargs.pop('source_content_md5', None)
+    append_conditions = None
+    if maxsize_condition or appendpos_condition is not None:
+        append_conditions = AppendPositionAccessConditions(
+            max_size=maxsize_condition,
+            append_position=appendpos_condition
+        )
+    source_authorization = kwargs.pop('source_authorization', None)
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+    source_mod_conditions = get_source_conditions(kwargs)
+    cpk_scope_info = get_cpk_scope_info(kwargs)
+    cpk = kwargs.pop('cpk', None)
+    cpk_info = None
+    if cpk:
+        cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                            encryption_algorithm=cpk.algorithm)
+
+    options = {
+        'copy_source_authorization': source_authorization,
+        'source_url': copy_source_url,
+        'content_length': 0,
+        'source_range': source_range,
+        'source_content_md5': source_content_md5,
+        'transactional_content_md5': None,
+        'lease_access_conditions': access_conditions,
+        'append_position_access_conditions': append_conditions,
+        'modified_access_conditions': mod_conditions,
+        'source_modified_access_conditions': source_mod_conditions,
+        'cpk_scope_info': cpk_scope_info,
+        'cpk_info': cpk_info,
+        'cls': return_response_headers,
+        'timeout': kwargs.pop('timeout', None)}
+    options.update(kwargs)
+    return options
+
+def _seal_append_blob_options(**kwargs: Any) -> Dict[str, Any]:
+    appendpos_condition = kwargs.pop('appendpos_condition', None)
+    append_conditions = None
+    if appendpos_condition is not None:
+        append_conditions = AppendPositionAccessConditions(
+            append_position=appendpos_condition
+        )
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    mod_conditions = get_modify_conditions(kwargs)
+
+    options = {
+        'timeout': kwargs.pop('timeout', None),
+        'lease_access_conditions': access_conditions,
+        'append_position_access_conditions': append_conditions,
+        'modified_access_conditions': mod_conditions,
+        'cls': return_response_headers}
+    options.update(kwargs)
+    return options
+
+def _from_blob_url(
+    blob_url: str,
+    snapshot: Optional[Union[BlobProperties, str, Dict[str, Any]]]
+) -> Tuple[str, str, str, Optional[str]]:
+    try:
+        if not blob_url.lower().startswith('http'):
+            blob_url = "https://" + blob_url
+    except AttributeError as exc:
+        raise ValueError("Blob URL must be a string.") from exc
+    parsed_url = urlparse(blob_url.rstrip('/'))
+
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {blob_url}")
+
+    account_path = ""
+    if ".core." in parsed_url.netloc:
+        # .core. is indicating non-customized url. Blob name with directory info can also be parsed.
+        path_blob = parsed_url.path.lstrip('/').split('/', maxsplit=1)
+    elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc:
+        path_blob = parsed_url.path.lstrip('/').split('/', maxsplit=2)
+        account_path += '/' + path_blob[0]
+    else:
+        # for customized url. blob name that has directory info cannot be parsed.
+        path_blob = parsed_url.path.lstrip('/').split('/')
+        if len(path_blob) > 2:
+            account_path = "/" + "/".join(path_blob[:-2])
+
+    account_url = f"{parsed_url.scheme}://{parsed_url.netloc.rstrip('/')}{account_path}?{parsed_url.query}"
+
+    msg_invalid_url = "Invalid URL. Provide a blob_url with a valid blob and container name."
+    if len(path_blob) <= 1:
+        raise ValueError(msg_invalid_url)
+    container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1])
+    if not container_name or not blob_name:
+        raise ValueError(msg_invalid_url)
+
+    path_snapshot, _ = parse_query(parsed_url.query)
+    if snapshot:
+        if isinstance(snapshot, BlobProperties):
+            path_snapshot = snapshot.snapshot
+        elif isinstance(snapshot, dict):
+            path_snapshot = snapshot['snapshot']
+        else:
+            path_snapshot = snapshot
+    return (account_url, container_name, blob_name, path_snapshot)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client.py
new file mode 100644
index 00000000..f6e17cb7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client.py
@@ -0,0 +1,788 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from typing import (
+    Any, Dict, List, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._blob_client import BlobClient
+from ._blob_service_client_helpers import _parse_url
+from ._container_client import ContainerClient
+from ._deserialize import service_properties_deserialize, service_stats_deserialize
+from ._encryption import StorageEncryptionMixin
+from ._generated import AzureBlobStorage
+from ._generated.models import KeyInfo, StorageServiceProperties
+from ._list_blobs_helper import FilteredBlobPaged
+from ._models import BlobProperties, ContainerProperties, ContainerPropertiesPaged, CorsRule
+from ._serialize import get_api_version
+from ._shared.base_client import parse_connection_str, parse_query, StorageAccountHostsMixin, TransportWrapper
+from ._shared.models import LocationMode
+from ._shared.parser import _to_utc_datetime
+from ._shared.response_handlers import (
+    parse_to_internal_user_delegation_key,
+    process_storage_error,
+    return_response_headers
+)
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from datetime import datetime
+    from ._lease import BlobLeaseClient
+    from ._models import (
+        BlobAnalyticsLogging,
+        FilteredBlob,
+        Metrics,
+        PublicAccess,
+        RetentionPolicy,
+        StaticWebsite
+    )
+    from ._shared.models import UserDelegationKey
+
+
+class BlobServiceClient(StorageAccountHostsMixin, StorageEncryptionMixin):
+    """A client to interact with the Blob Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete containers within the account.
+    For operations relating to a specific container or blob, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URL to the blob storage account. Any other entities included
+        in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication.py
+            :start-after: [START create_blob_service_client]
+            :end-before: [END create_blob_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with account url and credential.
+
+        .. literalinclude:: ../samples/blob_samples_authentication.py
+            :start-after: [START create_blob_service_client_oauth]
+            :end-before: [END create_blob_service_client_oauth]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with Default Azure Identity credentials.
+    """
+
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        parsed_url, sas_token = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname):
+        """Format the endpoint URL according to the current location
+        mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob service client.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string]
+                :end-before: [END auth_from_connection_string]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobServiceClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace
+    def get_user_delegation_key(
+        self, key_start_time: "datetime",
+        key_expiry_time: "datetime",
+        **kwargs: Any
+    ) -> "UserDelegationKey":
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The user delegation key.
+        :rtype: ~azure.storage.blob.UserDelegationKey
+        """
+        key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info,
+                                                                               timeout=timeout,
+                                                                               **kwargs)  # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        return parse_to_internal_user_delegation_key(user_delegation_key)  # type: ignore
+
+    @distributed_trace
+    def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START get_blob_service_account_info]
+                :end-before: [END get_blob_service_account_info]
+                :language: python
+                :dedent: 8
+                :caption: Getting account information for the blob service.
+        """
+        try:
+            return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_service_stats(self, **kwargs: Any) -> Dict[str, Any]:
+        """Retrieves statistics related to replication for the Blob service.
+
+        It is only available when read-access geo-redundant replication is enabled for
+        the storage account.
+
+        With geo-redundant replication, Azure Storage maintains your data durable
+        in two locations. In both locations, Azure Storage constantly maintains
+        multiple healthy replicas of your data. The location where you read,
+        create, update, or delete data is the primary storage account location.
+        The primary location exists in the region you choose at the time you
+        create an account via the Azure Management Azure classic portal, for
+        example, North Central US. The location to which your data is replicated
+        is the secondary location. The secondary location is automatically
+        determined based on the location of the primary; it is in a second data
+        center that resides in the same region as the primary location. Read-only
+        access is available from the secondary location, if read-access geo-redundant
+        replication is enabled for your storage account.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The blob service stats.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START get_blob_service_stats]
+                :end-before: [END get_blob_service_stats]
+                :language: python
+                :dedent: 8
+                :caption: Getting service stats for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = self._client.service.get_statistics( # type: ignore
+                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
+            return service_stats_deserialize(stats)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An object containing blob service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START get_blob_service_properties]
+                :end-before: [END get_blob_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Getting service properties for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_service_properties(
+        self, analytics_logging: Optional["BlobAnalyticsLogging"] = None,
+        hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        target_version: Optional[str] = None,
+        delete_retention_policy: Optional["RetentionPolicy"] = None,
+        static_website: Optional["StaticWebsite"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for blobs.
+        :type hour_metrics: ~azure.storage.blob.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for blobs.
+        :type minute_metrics: ~azure.storage.blob.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.blob.CorsRule]
+        :param str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :param delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted blobs.
+            It also specifies the number of days and versions of blob to keep.
+        :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
+        :param static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.blob.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START set_blob_service_properties]
+                :end-before: [END set_blob_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Setting service properties for the blob service.
+        """
+        if all(parameter is None for parameter in [
+                    analytics_logging, hour_metrics, minute_metrics, cors,
+                    target_version, delete_retention_policy, static_website]):
+            raise ValueError("set_service_properties should be called with at least one parameter")
+
+        props = StorageServiceProperties(
+            logging=analytics_logging,
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors), # pylint: disable=protected-access
+            default_service_version=target_version,
+            delete_retention_policy=delete_retention_policy,
+            static_website=static_website
+        )
+        timeout = kwargs.pop('timeout', None)
+        try:
+            self._client.service.set_properties(props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_containers(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: bool = False,
+        **kwargs: Any
+    ) -> ItemPaged[ContainerProperties]:
+        """Returns a generator to list the containers under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all containers have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only containers whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that container metadata to be returned in the response.
+            The default value is `False`.
+        :keyword bool include_deleted:
+            Specifies that deleted containers to be returned in the response. This is for container restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.4.0
+        :keyword bool include_system:
+            Flag specifying that system containers should be included.
+            .. versionadded:: 12.10.0
+        :keyword int results_per_page:
+            The maximum number of container names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ContainerProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START bsc_list_containers]
+                :end-before: [END bsc_list_containers]
+                :language: python
+                :dedent: 12
+                :caption: Listing the containers in the blob service.
+        """
+        include = ['metadata'] if include_metadata else []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        include_system = kwargs.pop('include_system', None)
+        if include_system:
+            include.append("system")
+
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_containers_segment,
+            prefix=name_starts_with,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+                command,
+                prefix=name_starts_with,
+                results_per_page=results_per_page,
+                page_iterator_class=ContainerPropertiesPaged
+            )
+
+    @distributed_trace
+    def find_blobs_by_tags(self, filter_expression: str, **kwargs: Any) -> ItemPaged["FilteredBlob"]:
+        """The Filter Blobs operation enables callers to list blobs across all
+        containers whose tags match a given search expression.  Filter blobs
+        searches across all containers within a storage account but can be
+        scoped within the expression to a single container.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+            To specify a container, eg. "@container='containerName' and \"Name\"='C'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob]
+        """
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.service.filter_blobs,
+            where=filter_expression,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace
+    def create_container(
+        self, name: str,
+        metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Creates a new container under the specified account.
+
+        If the container with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created container.
+
+        :param str name: The name of the container to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            container as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: 'container', 'blob'.
+        :type public_access: str or ~azure.storage.blob.PublicAccess
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client to interact with the newly created container.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START bsc_create_container]
+                :end-before: [END bsc_create_container]
+                :language: python
+                :dedent: 12
+                :caption: Creating a container in the blob service.
+        """
+        container = self.get_container_client(name)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        container.create_container(
+            metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
+        return container
+
+    @distributed_trace
+    def delete_container(
+        self, container: Union[ContainerProperties, str],
+        lease: Optional[Union["BlobLeaseClient", str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified container for deletion.
+
+        The container and any blobs contained within it are later deleted during garbage collection.
+        If the container is not found, a ResourceNotFoundError will be raised.
+
+        :param container:
+            The container to delete. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :type lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START bsc_delete_container]
+                :end-before: [END bsc_delete_container]
+                :language: python
+                :dedent: 12
+                :caption: Deleting a container in the blob service.
+        """
+        container_client = self.get_container_client(container)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        container_client.delete_container(
+            lease=lease,
+            timeout=timeout,
+            **kwargs)
+
+    @distributed_trace
+    def _rename_container(self, name: str, new_name: str, **kwargs: Any) -> ContainerClient:
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str name:
+            The name of the container to rename.
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client for the renamed container.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        renamed_container = self.get_container_client(new_name)
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            renamed_container._client.container.rename(name, **kwargs)  # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def undelete_container(
+        self, deleted_container_name: str,
+        deleted_container_version: str,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Restores soft-deleted container.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_container_name:
+            Specifies the name of the deleted container to restore.
+        :param str deleted_container_version:
+            Specifies the version of the deleted container to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The undeleted ContainerClient.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        if new_name:
+            warnings.warn("`new_name` is no longer supported.", DeprecationWarning)
+        container = self.get_container_client(new_name or deleted_container_name)
+        try:
+            container._client.container.restore(deleted_container_name=deleted_container_name,  # pylint: disable = protected-access
+                                                deleted_container_version=deleted_container_version,
+                                                timeout=kwargs.pop('timeout', None), **kwargs)
+            return container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_container_client(self, container: Union[ContainerProperties, str]) -> ContainerClient:
+        """Get a client to interact with the specified container.
+
+        The container need not already exist.
+
+        :param container:
+            The container. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START bsc_get_container_client]
+                :end-before: [END bsc_get_container_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the container client to interact with a specific container.
+        """
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return ContainerClient(
+            self.url, container_name=container_name,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+
+    def get_blob_client(
+        self, container: Union[ContainerProperties, str],
+        blob: str,
+        snapshot: Optional[Union[Dict[str, Any], str]] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param container:
+            The container that the blob is in. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param str blob: The name of the blob with which to interact.
+        :param snapshot:
+            The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
+            or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`.
+        :type snapshot: str or dict(str, Any)
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START bsc_get_blob_client]
+                :end-before: [END bsc_get_blob_client]
+                :language: python
+                :dedent: 12
+                :caption: Getting the blob client to interact with a specific blob.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.name
+        else:
+            blob_name = blob
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client_helpers.py
new file mode 100644
index 00000000..d2de950b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_blob_service_client_helpers.py
@@ -0,0 +1,27 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Tuple, TYPE_CHECKING
+from urllib.parse import urlparse
+from ._shared.base_client import parse_query
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+
+
+def _parse_url(account_url: str) -> Tuple["ParseResult", Any]:
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+
+    _, sas_token = parse_query(parsed_url.query)
+
+    return parsed_url, sas_token
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client.py
new file mode 100644
index 00000000..783df6bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client.py
@@ -0,0 +1,1620 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from datetime import datetime
+from typing import (
+    Any, AnyStr, cast, Dict, List, IO, Iterable, Iterator, Optional, overload, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import unquote, urlparse
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._blob_client import BlobClient
+from ._container_client_helpers import (
+    _format_url,
+    _generate_delete_blobs_options,
+    _generate_set_tiers_options,
+    _parse_url
+)
+from ._deserialize import deserialize_container_properties
+from ._download import StorageStreamDownloader
+from ._encryption import StorageEncryptionMixin
+from ._generated import AzureBlobStorage
+from ._generated.models import SignedIdentifier
+from ._lease import BlobLeaseClient
+from ._list_blobs_helper import (
+    BlobNamesPaged,
+    BlobPrefix,
+    BlobPropertiesPaged,
+    FilteredBlobPaged,
+    IgnoreListBlobsDeserializer
+)
+from ._models import (
+    BlobProperties,
+    BlobType,
+    ContainerProperties,
+    FilteredBlob
+)
+from ._serialize import get_access_conditions, get_api_version, get_container_cpk_scope_info, get_modify_conditions
+from ._shared.base_client import parse_connection_str, StorageAccountHostsMixin, TransportWrapper
+from ._shared.request_handlers import add_metadata_headers, serialize_iso
+from ._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from azure.core.pipeline.transport import HttpResponse  # pylint: disable=C4756
+    from azure.storage.blob import BlobServiceClient
+    from ._models import (
+        AccessPolicy,
+        PremiumPageBlobTier,
+        PublicAccess,
+        StandardBlobTier
+    )
+
+
+class ContainerClient(StorageAccountHostsMixin, StorageEncryptionMixin):    # pylint: disable=too-many-public-methods
+    """A client to interact with a specific container, although that container
+    may not yet exist.
+
+    For operations relating to a specific blob within this container, a blob client can be
+    retrieved using the :func:`~get_blob_client` function.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the container,
+        use the :func:`from_container_url` classmethod.
+    :param container_name:
+        The name of the container for the blob.
+    :type container_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_containers.py
+            :start-after: [START create_container_client_from_service]
+            :end-before: [END create_container_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a ContainerClient from an existing BlobServiceClient.
+
+        .. literalinclude:: ../samples/blob_samples_containers.py
+            :start-after: [START create_container_client_sasurl]
+            :end-before: [END create_container_client_sasurl]
+            :language: python
+            :dedent: 8
+            :caption: Creating the container client directly.
+    """
+    def __init__(
+        self, account_url: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        parsed_url, sas_token = _parse_url(account_url=account_url, container_name=container_name)
+
+        self.container_name = container_name
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client()
+        self._configure_encryption(kwargs)
+
+    def _build_generated_client(self) -> AzureBlobStorage:
+        client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        client._config.version = self._api_version  # type: ignore [assignment] # pylint: disable=protected-access
+        return client
+
+    def _format_url(self, hostname):
+        return _format_url(
+            container_name=self.container_name,
+            hostname=hostname,
+            scheme=self.scheme,
+            query_str=self._query_str
+        )
+
+    @classmethod
+    def from_container_url(
+        cls, container_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a container url.
+
+        :param str container_url:
+            The full endpoint URL to the Container, including SAS token if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type container_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        try:
+            if not container_url.lower().startswith('http'):
+                container_url = "https://" + container_url
+        except AttributeError as exc:
+            raise ValueError("Container URL must be a string.") from exc
+        parsed_url = urlparse(container_url)
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {container_url}")
+
+        container_path = parsed_url.path.strip('/').split('/')
+        account_path = ""
+        if len(container_path) > 1:
+            account_path = "/" + "/".join(container_path[:-1])
+        account_url = f"{parsed_url.scheme}://{parsed_url.netloc.rstrip('/')}{account_path}?{parsed_url.query}"
+        container_name = unquote(container_path[-1])
+        if not container_name:
+            raise ValueError("Invalid URL. Please provide a URL with a valid container name")
+        return cls(account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name:
+            The container name for the blob.
+        :type container_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_container]
+                :end-before: [END auth_from_connection_string_container]
+                :language: python
+                :dedent: 8
+                :caption: Creating the ContainerClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @distributed_trace
+    def create_container(
+        self, metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, "datetime"]]:
+        """
+        Creates a new container under the specified account. If the container
+        with the same name already exists, the operation fails.
+
+        :param metadata:
+            A dict with name_value pairs to associate with the
+            container as metadata. Example:{'Category':'test'}
+        :type metadata: dict[str, str]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary of response headers.
+        :rtype: Dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START create_container]
+                :end-before: [END create_container]
+                :language: python
+                :dedent: 12
+                :caption: Creating a container to store blobs.
+        """
+        headers = kwargs.pop('headers', {})
+        timeout = kwargs.pop('timeout', None)
+        headers.update(add_metadata_headers(metadata)) # type: ignore
+        container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
+        try:
+            return self._client.container.create( # type: ignore
+                timeout=timeout,
+                access=public_access,
+                container_cpk_scope_info=container_cpk_scope_info,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def _rename_container(self, new_name: str, **kwargs: Any) -> "ContainerClient":
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :type lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The renamed container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            renamed_container = ContainerClient(
+                f"{self.scheme}://{self.primary_hostname}", container_name=new_name,
+                credential=self.credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+                require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+                key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+            renamed_container._client.container.rename(self.container_name, **kwargs)   # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def delete_container(self, **kwargs: Any) -> None:
+        """
+        Marks the specified container for deletion. The container and any blobs
+        contained within it are later deleted during garbage collection.
+
+        :keyword lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START delete_container]
+                :end-before: [END delete_container]
+                :language: python
+                :dedent: 12
+                :caption: Delete a container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            self._client.container.delete(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def acquire_lease(
+        self, lease_duration: int =-1,
+        lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> BlobLeaseClient:
+        """
+        Requests a new lease. If the container does not have an active lease,
+        the Blob service creates a lease on the container and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.blob.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START acquire_lease_on_container]
+                :end-before: [END acquire_lease_on_container]
+                :language: python
+                :dedent: 8
+                :caption: Acquiring a lease on the container.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
+        return lease
+
+    @distributed_trace
+    def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_container_properties(self, **kwargs: Any) -> ContainerProperties:
+        """Returns all user-defined metadata and system properties for the specified
+        container. The data returned does not include the container's list of blobs.
+
+        :keyword lease:
+            If specified, get_container_properties only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Properties for the specified container within a container object.
+        :rtype: ~azure.storage.blob.ContainerProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START get_container_properties]
+                :end-before: [END get_container_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting properties on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = self._client.container.get_properties(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=deserialize_container_properties,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        response.name = self.container_name
+        return response # type: ignore
+
+    @distributed_trace
+    def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a container exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        try:
+            self._client.container.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace
+    def set_container_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, "datetime"]]:
+        """Sets one or more user-defined name-value pairs for the specified
+        container. Each call to this operation replaces all existing metadata
+        attached to the container. To remove all metadata from the container,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the container as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_container_metadata only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START set_container_metadata]
+                :end-before: [END set_container_metadata]
+                :language: python
+                :dedent: 12
+                :caption: Setting metadata on the container.
+        """
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return self._client.container.set_metadata( # type: ignore
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def _get_blob_service_client(self) -> "BlobServiceClient":
+        """Get a client to interact with the container's parent service account.
+
+        Defaults to current container's credentials.
+
+        :returns: A BlobServiceClient.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service.py
+                :start-after: [START get_blob_service_client_from_container_client]
+                :end-before: [END get_blob_service_client_from_container_client]
+                :language: python
+                :dedent: 8
+                :caption: Get blob service client from container object.
+        """
+        from ._blob_service_client import BlobServiceClient
+        if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access
+            _pipeline = Pipeline(
+                transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=self._pipeline._impl_policies # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return BlobServiceClient(
+            f"{self.scheme}://{self.primary_hostname}",
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption,
+            encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key,
+            key_resolver_function=self.key_resolver_function, _pipeline=_pipeline)
+
+    @distributed_trace
+    def get_container_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the specified container.
+        The permissions indicate whether container data may be accessed publicly.
+
+        :keyword lease:
+            If specified, get_container_access_policy only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START get_container_access_policy]
+                :end-before: [END get_container_access_policy]
+                :language: python
+                :dedent: 12
+                :caption: Getting the access policy on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = self._client.container.get_access_policy(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=return_headers_and_deserialized,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('blob_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace
+    def set_container_access_policy(
+        self, signed_identifiers: Dict[str, "AccessPolicy"],
+        public_access: Optional[Union[str, "PublicAccess"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the permissions for the specified container or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether blobs in a container may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the container. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START set_container_access_policy]
+                :end-before: [END set_container_access_policy]
+                :language: python
+                :dedent: 12
+                :caption: Setting access policy on the container.
+        """
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
+        signed_identifiers = identifiers # type: ignore
+        lease = kwargs.pop('lease', None)
+        mod_conditions = get_modify_conditions(kwargs)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Union[str, datetime]], self._client.container.set_access_policy(
+                container_acl=signed_identifiers or None,
+                timeout=timeout,
+                access=public_access,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[str, List[str]]] = None,
+        **kwargs: Any
+    ) -> ItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START list_blobs_in_container]
+                :end-before: [END list_blobs_in_container]
+                :language: python
+                :dedent: 8
+                :caption: List the blobs in the container.
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_flat_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page, container=self.container_name,
+            page_iterator_class=BlobPropertiesPaged)
+
+    @distributed_trace
+    def list_blob_names(self, **kwargs: Any) -> ItemPaged[str]:
+        """Returns a generator to list the names of blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        Note that no additional properties or metadata will be returned when using this API.
+        Additionally, this API does not have an option to include additional blobs such as snapshots,
+        versions, soft-deleted blobs, etc. To get any of this data, use :func:`list_blobs()`.
+
+        :keyword str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of blob names as strings.
+        :rtype: ~azure.core.paging.ItemPaged[str]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        name_starts_with = kwargs.pop('name_starts_with', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+
+        # For listing only names we need to create a one-off generated client and
+        # override its deserializer to prevent deserialization of the full response.
+        client = self._build_generated_client()
+        client.container._deserialize = IgnoreListBlobsDeserializer()  # pylint: disable=protected-access
+
+        command = functools.partial(
+            client.container.list_blob_flat_segment,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=BlobNamesPaged)
+
+    @distributed_trace
+    def walk_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[List[str], str]] = None,
+        delimiter: str = "/",
+        **kwargs: Any
+    ) -> ItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service. This operation will list blobs in accordance with a hierarchy,
+        as delimited by the specified delimiter character.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :param str delimiter:
+            When the request includes this parameter, the operation returns a BlobPrefix
+            element in the response body that acts as a placeholder for all blobs whose
+            names begin with the same substring up to the appearance of the delimiter
+            character. The delimiter may be a single character or a string.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_hierarchy_segment,
+            delimiter=delimiter,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return BlobPrefix(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            delimiter=delimiter)
+
+    @distributed_trace
+    def find_blobs_by_tags(
+        self, filter_expression: str,
+        **kwargs: Any
+    ) -> ItemPaged[FilteredBlob]:
+        """Returns a generator to list the blobs under the specified container whose tags
+        match the given search expression.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of FilteredBlob.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.filter_blobs,
+            timeout=timeout,
+            where=filter_expression,
+            **kwargs)
+        return ItemPaged(
+            command, results_per_page=results_per_page, container=self.container_name,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace
+    def upload_blob(
+        self, name: str,
+        data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs
+    ) -> BlobClient:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param str name: The blob with which to interact.
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when the blob size exceeds
+            64MB.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            A callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], None]
+        :returns: A BlobClient to interact with the newly uploaded blob.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START upload_blob_to_container]
+                :end-before: [END upload_blob_to_container]
+                :language: python
+                :dedent: 8
+                :caption: Upload blob to the container.
+        """
+        if isinstance(name, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param name is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob = self.get_blob_client(name)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        blob.upload_blob(
+            data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            timeout=timeout,
+            encoding=encoding,
+            **kwargs
+        )
+        return blob
+
+    @distributed_trace
+    def delete_blob(
+        self, blob: str,
+        delete_snapshots: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified blob or snapshot for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
+        and retains the blob or snapshot for specified number of days.
+        After specified number of days, blob's data is removed from the service during garbage collection.
+        Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        option. Soft-deleted blob or snapshot can be restored using :func:`~azure.storage.blob.BlobClient.undelete()`
+
+        :param str blob: The blob with which to interact.
+        :param str delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob_client = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        blob_client.delete_blob( # type: ignore
+            delete_snapshots=delete_snapshots,
+            timeout=timeout,
+            **kwargs)
+
+    @overload
+    def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace
+    def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+        :param str blob: The blob with which to interact.
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            The number of parallel connections with which to download.
+        :keyword str encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            A callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.StorageStreamDownloader
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob_client = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        return blob_client.download_blob(
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            **kwargs)
+
+    @distributed_trace
+    def delete_blobs(  # pylint: disable=delete-operation-wrong-return-type
+        self, *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Marks the specified blobs or snapshots for deletion.
+
+        The blobs are later deleted during garbage collection.
+        Note that in order to delete blobs, you must delete all of their
+        snapshots. You can delete both at the same time with the delete_blobs operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
+        and retains the blobs or snapshots for specified number of days.
+        After specified number of days, blobs' data is removed from the service during garbage collection.
+        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        Soft-deleted blobs or snapshots can be restored using :func:`~azure.storage.blob.BlobClient.undelete()`
+
+        The maximum number of blobs that can be deleted in a single request is 256.
+
+        :param blobs:
+            The blobs to delete. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                snapshot you want to delete:
+                    key: 'snapshot', value type: str
+                version id:
+                    key: 'version_id', value type: str
+                whether to delete snapshots when deleting blob:
+                    key: 'delete_snapshots', value: 'include' or 'only'
+                if the blob modified or not:
+                    key: 'if_modified_since', 'if_unmodified_since', value type: datetime
+                etag:
+                    key: 'etag', value type: str
+                match the etag or not:
+                    key: 'match_condition', value type: MatchConditions
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: Union[str, Dict[str, Any], BlobProperties]
+        :keyword str delete_snapshots:
+            Required if a blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: An iterator of responses, one for each blob in order
+        :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common.py
+                :start-after: [START delete_multiple_blobs]
+                :end-before: [END delete_multiple_blobs]
+                :language: python
+                :dedent: 8
+                :caption: Deleting multiple blobs.
+        """
+        if len(blobs) == 0:
+            return iter([])
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+
+        reqs, options = _generate_delete_blobs_options(
+            self._query_str,
+            self.container_name,
+            self._client,
+            *blobs,
+            **kwargs
+        )
+
+        return self._batch_send(*reqs, **options)
+
+    @distributed_trace
+    def set_standard_blob_tier_blobs(
+        self, standard_blob_tier: Optional[Union[str, "StandardBlobTier"]],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """This operation sets the tier on block blobs.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :param blobs:
+            The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                standard blob tier:
+                    key: 'blob_tier', value type: StandardBlobTier
+                rehydrate priority:
+                    key: 'rehydrate_priority', value type: RehydratePriority
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                snapshot:
+                    key: "snapshot", value type: str
+                version id:
+                    key: "version_id", value type: str
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure.
+        :return: An iterator of responses, one for each blob in order
+        :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            standard_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return self._batch_send(*reqs, **options)
+
+    @distributed_trace
+    def set_premium_page_blob_tier_blobs(
+        self, premium_page_blob_tier: Optional[Union[str, "PremiumPageBlobTier"]],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :param blobs:
+            The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                premium blob tier:
+                    key: 'blob_tier', value type: PremiumPageBlobTier
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure.
+        :return: An iterator of responses, one for each blob in order
+        :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            premium_page_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return self._batch_send(*reqs, **options)
+
+    def get_blob_client(
+        self, blob: str,
+        snapshot: Optional[str] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param str blob:
+            The blob with which to interact.
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`~BlobClient.create_snapshot()`.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers.py
+                :start-after: [START get_blob_client]
+                :end-before: [END get_blob_client]
+                :language: python
+                :dedent: 8
+                :caption: Get the blob client.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.get('name')
+        else:
+            blob_name = blob
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client_helpers.py
new file mode 100644
index 00000000..82edd48d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_container_client_helpers.py
@@ -0,0 +1,266 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
+from urllib.parse import quote, urlparse
+
+from azure.core import MatchConditions
+from azure.core.pipeline.transport import HttpRequest
+from ._blob_client_helpers import _generic_delete_blob_options
+from ._generated import AzureBlobStorage
+from ._models import BlobProperties
+from ._shared.base_client import parse_query
+
+if TYPE_CHECKING:
+    from azure.storage.blob import RehydratePriority
+    from urllib.parse import ParseResult
+    from ._generated.models import LeaseAccessConditions, ModifiedAccessConditions
+    from ._models import PremiumPageBlobTier, StandardBlobTier
+
+
+def _parse_url(account_url: str, container_name: str) -> Tuple["ParseResult", Any]:
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Container URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not container_name:
+        raise ValueError("Please specify a container name.")
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+
+    _, sas_token = parse_query(parsed_url.query)
+
+    return parsed_url, sas_token
+
+def _format_url(container_name: Union[bytes, str], hostname: str, scheme: str, query_str: str) -> str:
+    if isinstance(container_name, str):
+        container_name = container_name.encode('UTF-8')
+    return f"{scheme}://{hostname}/{quote(container_name)}{query_str}"
+
+# This code is a copy from _generated.
+# Once Autorest is able to provide request preparation this code should be removed.
+def _generate_delete_blobs_subrequest_options(
+    client: AzureBlobStorage,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    delete_snapshots: Optional[str] = None,
+    lease_access_conditions: Optional["LeaseAccessConditions"] = None,
+    modified_access_conditions: Optional["ModifiedAccessConditions"] = None,
+    **kwargs
+) -> Tuple[Dict[str, Any], Dict[str, Any]]:
+    lease_id = None
+    if lease_access_conditions is not None:
+        lease_id = lease_access_conditions.lease_id
+    if_modified_since = None
+    if modified_access_conditions is not None:
+        if_modified_since = modified_access_conditions.if_modified_since
+    if_unmodified_since = None
+    if modified_access_conditions is not None:
+        if_unmodified_since = modified_access_conditions.if_unmodified_since
+    if_match = None
+    if modified_access_conditions is not None:
+        if_match = modified_access_conditions.if_match
+    if_none_match = None
+    if modified_access_conditions is not None:
+        if_none_match = modified_access_conditions.if_none_match
+    if_tags = None
+    if modified_access_conditions is not None:
+        if_tags = modified_access_conditions.if_tags
+
+    # Construct parameters
+    timeout = kwargs.pop('timeout', None)
+    query_parameters = {}
+    if snapshot is not None:
+        query_parameters['snapshot'] = client._serialize.query("snapshot", snapshot, 'str')  # pylint: disable=protected-access
+    if version_id is not None:
+        query_parameters['versionid'] = client._serialize.query("version_id", version_id, 'str')  # pylint: disable=protected-access
+    if timeout is not None:
+        query_parameters['timeout'] = client._serialize.query("timeout", timeout, 'int', minimum=0)  # pylint: disable=protected-access
+
+    # Construct headers
+    header_parameters = {}
+    if delete_snapshots is not None:
+        header_parameters['x-ms-delete-snapshots'] = client._serialize.header(  # pylint: disable=protected-access
+            "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
+    if lease_id is not None:
+        header_parameters['x-ms-lease-id'] = client._serialize.header(  # pylint: disable=protected-access
+            "lease_id", lease_id, 'str')
+    if if_modified_since is not None:
+        header_parameters['If-Modified-Since'] = client._serialize.header(  # pylint: disable=protected-access
+            "if_modified_since", if_modified_since, 'rfc-1123')
+    if if_unmodified_since is not None:
+        header_parameters['If-Unmodified-Since'] = client._serialize.header(  # pylint: disable=protected-access
+            "if_unmodified_since", if_unmodified_since, 'rfc-1123')
+    if if_match is not None:
+        header_parameters['If-Match'] = client._serialize.header(  # pylint: disable=protected-access
+            "if_match", if_match, 'str')
+    if if_none_match is not None:
+        header_parameters['If-None-Match'] = client._serialize.header(  # pylint: disable=protected-access
+            "if_none_match", if_none_match, 'str')
+    if if_tags is not None:
+        header_parameters['x-ms-if-tags'] = client._serialize.header("if_tags", if_tags, 'str')  # pylint: disable=protected-access
+
+    return query_parameters, header_parameters
+
+def _generate_delete_blobs_options(
+    query_str: str,
+    container_name: str,
+    client: AzureBlobStorage,
+    *blobs: Union[str, Dict[str, Any], BlobProperties],
+    **kwargs: Any
+) -> Tuple[List[HttpRequest], Dict[str, Any]]:
+    timeout = kwargs.pop('timeout', None)
+    raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
+    delete_snapshots = kwargs.pop('delete_snapshots', None)
+    if_modified_since = kwargs.pop('if_modified_since', None)
+    if_unmodified_since = kwargs.pop('if_unmodified_since', None)
+    if_tags_match_condition = kwargs.pop('if_tags_match_condition', None)
+    url_prepend = kwargs.pop('url_prepend', None)
+    kwargs.update({'raise_on_any_failure': raise_on_any_failure,
+                    'sas': query_str.replace('?', '&'),
+                    'timeout': '&timeout=' + str(timeout) if timeout else "",
+                    'path': container_name,
+                    'restype': 'restype=container&'
+                    })
+
+    reqs = []
+    for blob in blobs:
+        if not isinstance(blob, str):
+            blob_name = blob.get('name')
+            options = _generic_delete_blob_options(
+                snapshot=blob.get('snapshot'),
+                version_id=blob.get('version_id'),
+                delete_snapshots=delete_snapshots or blob.get('delete_snapshots'),
+                lease=blob.get('lease_id'),
+                if_modified_since=if_modified_since or blob.get('if_modified_since'),
+                if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'),
+                etag=blob.get('etag'),
+                if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'),
+                match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag')
+                else None,
+                timeout=blob.get('timeout'),
+            )
+        else:
+            blob_name = blob
+            options = _generic_delete_blob_options(
+                delete_snapshots=delete_snapshots,
+                if_modified_since=if_modified_since,
+                if_unmodified_since=if_unmodified_since,
+                if_tags_match_condition=if_tags_match_condition
+            )
+
+        query_parameters, header_parameters = _generate_delete_blobs_subrequest_options(client, **options)
+
+        req = HttpRequest(
+            "DELETE",
+            (f"{'/' + quote(url_prepend) if url_prepend else ''}/"
+             f"{quote(container_name)}/{quote(str(blob_name), safe='/~')}{query_str}"),
+            headers=header_parameters
+        )
+
+        req.format_parameters(query_parameters)
+        reqs.append(req)
+
+    return reqs, kwargs
+
+# This code is a copy from _generated.
+# Once Autorest is able to provide request preparation this code should be removed.
+def _generate_set_tiers_subrequest_options(
+    client: AzureBlobStorage,
+    tier: Optional[Union["PremiumPageBlobTier", "StandardBlobTier", str]],
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    rehydrate_priority: Optional["RehydratePriority"] = None,
+    lease_access_conditions: Optional["LeaseAccessConditions"] = None,
+    **kwargs: Any
+) -> Tuple[Dict[str, Any], Dict[str, Any]]:
+    if not tier:
+        raise ValueError("A blob tier must be specified")
+    if snapshot and version_id:
+        raise ValueError("Snapshot and version_id cannot be set at the same time")
+    if_tags = kwargs.pop('if_tags', None)
+
+    lease_id = None
+    if lease_access_conditions is not None:
+        lease_id = lease_access_conditions.lease_id
+
+    comp = "tier"
+    timeout = kwargs.pop('timeout', None)
+    # Construct parameters
+    query_parameters = {}
+    if snapshot is not None:
+        query_parameters['snapshot'] = client._serialize.query("snapshot", snapshot, 'str')  # pylint: disable=protected-access
+    if version_id is not None:
+        query_parameters['versionid'] = client._serialize.query("version_id", version_id, 'str')  # pylint: disable=protected-access
+    if timeout is not None:
+        query_parameters['timeout'] = client._serialize.query("timeout", timeout, 'int', minimum=0)  # pylint: disable=protected-access
+    query_parameters['comp'] = client._serialize.query("comp", comp, 'str')  # pylint: disable=protected-access
+
+    # Construct headers
+    header_parameters = {}
+    header_parameters['x-ms-access-tier'] = client._serialize.header("tier", tier, 'str')  # pylint: disable=protected-access
+    if rehydrate_priority is not None:
+        header_parameters['x-ms-rehydrate-priority'] = client._serialize.header(  # pylint: disable=protected-access
+            "rehydrate_priority", rehydrate_priority, 'str')
+    if lease_id is not None:
+        header_parameters['x-ms-lease-id'] = client._serialize.header("lease_id", lease_id, 'str')  # pylint: disable=protected-access
+    if if_tags is not None:
+        header_parameters['x-ms-if-tags'] = client._serialize.header("if_tags", if_tags, 'str')  # pylint: disable=protected-access
+
+    return query_parameters, header_parameters
+
+def _generate_set_tiers_options(
+    query_str: str,
+    container_name: str,
+    blob_tier: Optional[Union["PremiumPageBlobTier", "StandardBlobTier", str]],
+    client: AzureBlobStorage,
+    *blobs: Union[str, Dict[str, Any], BlobProperties],
+    **kwargs: Any
+) -> Tuple[List[HttpRequest], Dict[str, Any]]:
+    timeout = kwargs.pop('timeout', None)
+    raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
+    rehydrate_priority = kwargs.pop('rehydrate_priority', None)
+    if_tags = kwargs.pop('if_tags_match_condition', None)
+    url_prepend = kwargs.pop('url_prepend', None)
+    kwargs.update({'raise_on_any_failure': raise_on_any_failure,
+                    'sas': query_str.replace('?', '&'),
+                    'timeout': '&timeout=' + str(timeout) if timeout else "",
+                    'path': container_name,
+                    'restype': 'restype=container&'
+                    })
+
+    reqs = []
+    for blob in blobs:
+        if not isinstance(blob, str):
+            blob_name = blob.get('name')
+            tier = blob_tier or blob.get('blob_tier')
+            query_parameters, header_parameters = _generate_set_tiers_subrequest_options(
+                client=client,
+                tier=tier,
+                snapshot=blob.get('snapshot'),
+                version_id=blob.get('version_id'),
+                rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'),
+                lease_access_conditions=blob.get('lease_id'),
+                if_tags=if_tags or blob.get('if_tags_match_condition'),
+                timeout=timeout or blob.get('timeout')
+            )
+        else:
+            blob_name = blob
+            query_parameters, header_parameters = _generate_set_tiers_subrequest_options(
+                client, blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags)
+
+        req = HttpRequest(
+            "PUT",
+            (f"{'/' + quote(url_prepend) if url_prepend else ''}/"
+             f"{quote(container_name)}/{quote(str(blob_name), safe='/~')}{query_str}"),
+            headers=header_parameters
+        )
+        req.format_parameters(query_parameters)
+        reqs.append(req)
+
+    return reqs, kwargs
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_deserialize.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_deserialize.py
new file mode 100644
index 00000000..b6ee9160
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_deserialize.py
@@ -0,0 +1,234 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
+from urllib.parse import unquote
+from xml.etree.ElementTree import Element
+
+from ._models import (
+    BlobAnalyticsLogging,
+    BlobProperties,
+    BlobType,
+    ContainerProperties,
+    ContentSettings,
+    CopyProperties,
+    CorsRule,
+    ImmutabilityPolicy,
+    LeaseProperties,
+    Metrics,
+    ObjectReplicationPolicy,
+    ObjectReplicationRule,
+    RetentionPolicy,
+    StaticWebsite
+)
+from ._shared.models import get_enum_value
+from ._shared.response_handlers import deserialize_metadata
+
+if TYPE_CHECKING:
+    from azure.core.pipeline import PipelineResponse
+    from ._generated.models import (
+        BlobItemInternal,
+        BlobTags,
+        PageList,
+        StorageServiceProperties,
+        StorageServiceStats,
+    )
+    from ._shared.models import LocationMode
+
+def deserialize_pipeline_response_into_cls(cls_method, response: "PipelineResponse", obj: Any, headers: Dict[str, Any]):
+    try:
+        deserialized_response = response.http_response
+    except AttributeError:
+        deserialized_response = response
+    return cls_method(deserialized_response, obj, headers)
+
+
+def deserialize_blob_properties(response: "PipelineResponse", obj: Any, headers: Dict[str, Any]) -> BlobProperties:
+    blob_properties = BlobProperties(
+        metadata=deserialize_metadata(response, obj, headers),
+        object_replication_source_properties=deserialize_ors_policies(response.http_response.headers),
+        **headers
+    )
+    if 'Content-Range' in headers:
+        if 'x-ms-blob-content-md5' in headers:
+            blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
+        else:
+            blob_properties.content_settings.content_md5 = None
+    return blob_properties
+
+
+def deserialize_ors_policies(policy_dictionary: Optional[Dict[str, str]]) -> Optional[List[ObjectReplicationPolicy]]:
+
+    if policy_dictionary is None:
+        return None
+    # For source blobs (blobs that have policy ids and rule ids applied to them),
+    # the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
+    # The value of this header is the status of the replication.
+    or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
+                                if 'or-' in key and key != 'x-ms-or-policy-id'}
+
+    parsed_result: Dict[str, List[ObjectReplicationRule]] = {}
+
+    for key, val in or_policy_status_headers.items():
+        # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
+        policy_and_rule_ids = key.split('or-')[1].split('_')
+        policy_id = policy_and_rule_ids[0]
+        rule_id = policy_and_rule_ids[1]
+
+        # If we are seeing this policy for the first time, create a new list to store rule_id -> result
+        parsed_result[policy_id] = parsed_result.get(policy_id) or []
+        parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
+
+    result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
+
+    return result_list
+
+
+def deserialize_blob_stream(
+    response: "PipelineResponse",
+    obj: Any,
+    headers: Dict[str, Any]
+) -> Tuple["LocationMode", Any]:
+    blob_properties = deserialize_blob_properties(response, obj, headers)
+    obj.properties = blob_properties
+    return response.http_response.location_mode, obj
+
+
+def deserialize_container_properties(
+    response: "PipelineResponse",
+    obj: Any,
+    headers: Dict[str, Any]
+) -> ContainerProperties:
+    metadata = deserialize_metadata(response, obj, headers)
+    container_properties = ContainerProperties(
+        metadata=metadata,
+        **headers
+    )
+    return container_properties
+
+
+def get_page_ranges_result(ranges: "PageList") -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+    page_range = []
+    clear_range = []
+    if ranges.page_range:
+        page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range]
+    if ranges.clear_range:
+        clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
+    return page_range, clear_range
+
+
+def service_stats_deserialize(generated: "StorageServiceStats") -> Dict[str, Any]:
+    status = None
+    last_sync_time = None
+    if generated.geo_replication is not None:
+        status = generated.geo_replication.status
+        last_sync_time = generated.geo_replication.last_sync_time
+    return {
+        'geo_replication': {
+            'status': status,
+            'last_sync_time': last_sync_time
+        }
+    }
+
+def service_properties_deserialize(generated: "StorageServiceProperties") -> Dict[str, Any]:
+    cors_list = None
+    if generated.cors is not None:
+        cors_list = [CorsRule._from_generated(cors) for cors in generated.cors]  # pylint: disable=protected-access
+    return {
+        'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging),  # pylint: disable=protected-access
+        'hour_metrics': Metrics._from_generated(generated.hour_metrics),  # pylint: disable=protected-access
+        'minute_metrics': Metrics._from_generated(generated.minute_metrics),  # pylint: disable=protected-access
+        'cors': cors_list,
+        'target_version': generated.default_service_version,
+        'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy),  # pylint: disable=protected-access
+        'static_website': StaticWebsite._from_generated(generated.static_website),  # pylint: disable=protected-access
+    }
+
+
+def get_blob_properties_from_generated_code(generated: "BlobItemInternal") -> BlobProperties:
+    blob = BlobProperties()
+    if generated.name.encoded and generated.name.content is not None:
+        blob.name = unquote(generated.name.content)
+    else:
+        blob.name = generated.name.content  #type: ignore
+    blob_type = get_enum_value(generated.properties.blob_type)
+    blob.blob_type = BlobType(blob_type)
+    blob.etag = generated.properties.etag
+    blob.deleted = generated.deleted
+    blob.snapshot = generated.snapshot
+    blob.is_append_blob_sealed = generated.properties.is_sealed
+    blob.metadata = generated.metadata.additional_properties if generated.metadata else {}  # type: ignore [assignment]
+    blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
+    blob.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
+    blob.copy = CopyProperties._from_generated(generated)  # pylint: disable=protected-access
+    blob.last_modified = generated.properties.last_modified
+    blob.creation_time = generated.properties.creation_time  # type: ignore [assignment]
+    blob.content_settings = ContentSettings._from_generated(generated)  # pylint: disable=protected-access
+    blob.size = generated.properties.content_length  # type: ignore [assignment]
+    blob.page_blob_sequence_number = generated.properties.blob_sequence_number
+    blob.server_encrypted = generated.properties.server_encrypted  # type: ignore [assignment]
+    blob.encryption_scope = generated.properties.encryption_scope
+    blob.deleted_time = generated.properties.deleted_time
+    blob.remaining_retention_days = generated.properties.remaining_retention_days
+    blob.blob_tier = generated.properties.access_tier  # type: ignore [assignment]
+    blob.rehydrate_priority = generated.properties.rehydrate_priority
+    blob.blob_tier_inferred = generated.properties.access_tier_inferred
+    blob.archive_status = generated.properties.archive_status
+    blob.blob_tier_change_time = generated.properties.access_tier_change_time
+    blob.version_id = generated.version_id
+    blob.is_current_version = generated.is_current_version
+    blob.tag_count = generated.properties.tag_count
+    blob.tags = parse_tags(generated.blob_tags)
+    blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
+    blob.last_accessed_on = generated.properties.last_accessed_on
+    blob.immutability_policy = ImmutabilityPolicy._from_generated(generated)  # pylint: disable=protected-access
+    blob.has_legal_hold = generated.properties.legal_hold
+    blob.has_versions_only = generated.has_versions_only
+    return blob
+
+def parse_tags(generated_tags: Optional["BlobTags"]) -> Optional[Dict[str, str]]:
+    """Deserialize a list of BlobTag objects into a dict.
+
+    :param Optional[BlobTags] generated_tags:
+        A list containing the BlobTag objects from generated code.
+    :returns: A dictionary of the BlobTag objects.
+    :rtype: Optional[Dict[str, str]]
+    """
+    if generated_tags:
+        tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
+        return tag_dict
+    return None
+
+
+def load_single_xml_node(element: Element, name: str) -> Optional[Element]:
+    return element.find(name)
+
+
+def load_many_xml_nodes(
+    element: Element,
+    name: str,
+    wrapper: Optional[str] = None
+) -> List[Optional[Element]]:
+    found_element: Optional[Element] = element
+    if wrapper:
+        found_element = load_single_xml_node(element, wrapper)
+    if found_element is None:
+        return []
+    return list(found_element.findall(name))
+
+
+def load_xml_string(element: Element, name: str) -> Optional[str]:
+    node = element.find(name)
+    if node is None or not node.text:
+        return None
+    return node.text
+
+
+def load_xml_int(element: Element, name: str) -> Optional[int]:
+    node = element.find(name)
+    if node is None or not node.text:
+        return None
+    return int(node.text)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_download.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_download.py
new file mode 100644
index 00000000..090c226c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_download.py
@@ -0,0 +1,933 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import codecs
+import sys
+import threading
+import time
+import warnings
+from io import BytesIO, StringIO
+from typing import (
+    Any, Callable, cast, Dict, Generator,
+    Generic, IO, Iterator, List, Optional,
+    overload, Tuple, TypeVar, Union, TYPE_CHECKING
+)
+
+from azure.core.exceptions import DecodeError, HttpResponseError, IncompleteReadError
+from azure.core.tracing.common import with_current_context
+
+from ._shared.request_handlers import validate_and_format_range_headers
+from ._shared.response_handlers import parse_length_from_content_range, process_storage_error
+from ._deserialize import deserialize_blob_properties, get_page_ranges_result
+from ._encryption import (
+    adjust_blob_size_for_encryption,
+    decrypt_blob,
+    get_adjusted_download_range_and_offset,
+    is_encryption_v2,
+    parse_encryption_data
+)
+
+if TYPE_CHECKING:
+    from codecs import IncrementalDecoder
+    from ._encryption import _EncryptionData
+    from ._generated import AzureBlobStorage
+    from ._generated.operations import BlobOperations
+    from ._models import BlobProperties
+    from ._shared.models import StorageConfiguration
+
+
+T = TypeVar('T', bytes, str)
+
+
+def process_range_and_offset(
+    start_range: int,
+    end_range: int,
+    length: Optional[int],
+    encryption_options: Dict[str, Any],
+    encryption_data: Optional["_EncryptionData"]
+) -> Tuple[Tuple[int, int], Tuple[int, int]]:
+    start_offset, end_offset = 0, 0
+    if encryption_options.get("key") is not None or encryption_options.get("resolver") is not None:
+        return get_adjusted_download_range_and_offset(
+            start_range,
+            end_range,
+            length,
+            encryption_data)
+
+    return (start_range, end_range), (start_offset, end_offset)
+
+
+def process_content(data: Any, start_offset: int, end_offset: int, encryption: Dict[str, Any]) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+
+    content = b"".join(list(data))
+
+    if content and encryption.get("key") is not None or encryption.get("resolver") is not None:
+        try:
+            return decrypt_blob(
+                encryption.get("required") or False,
+                encryption.get("key"),
+                encryption.get("resolver"),
+                content,
+                start_offset,
+                end_offset,
+                data.response.headers,
+            )
+        except Exception as error:
+            raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) from error
+    return content
+
+
+class _ChunkDownloader(object):  # pylint: disable=too-many-instance-attributes
+    def __init__(
+        self,
+        client: "BlobOperations",
+        total_size: int,
+        chunk_size: int,
+        current_progress: int,
+        start_range: int,
+        end_range: int,
+        validate_content: bool,
+        encryption_options: Dict[str, Any],
+        encryption_data: Optional["_EncryptionData"] = None,
+        stream: Any = None,
+        parallel: Optional[int] = None,
+        non_empty_ranges: Optional[List[Dict[str, Any]]] = None,
+        progress_hook: Optional[Callable[[int, Optional[int]], None]] = None,
+        **kwargs: Any
+    ) -> None:
+        self.client = client
+        self.non_empty_ranges = non_empty_ranges
+
+        # Information on the download range/chunk size
+        self.chunk_size = chunk_size
+        self.total_size = total_size
+        self.start_index = start_range
+        self.end_index = end_range
+
+        # The destination that we will write to
+        self.stream = stream
+        self.stream_lock = threading.Lock() if parallel else None
+        self.progress_lock = threading.Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # For a parallel download, the stream is always seekable, so we note down the current position
+        # in order to seek to the right place when out-of-order chunks come in
+        self.stream_start = stream.tell() if parallel else 0
+
+        # Download progress so far
+        self.progress_total = current_progress
+
+        # Encryption
+        self.encryption_options = encryption_options
+        self.encryption_data = encryption_data
+
+        # Parameters for each get operation
+        self.validate_content = validate_content
+        self.request_options = kwargs
+
+    def _calculate_range(self, chunk_start: int) -> Tuple[int, int]:
+        if chunk_start + self.chunk_size > self.end_index:
+            chunk_end = self.end_index
+        else:
+            chunk_end = chunk_start + self.chunk_size
+        return chunk_start, chunk_end
+
+    def get_chunk_offsets(self) -> Generator[int, None, None]:
+        index = self.start_index
+        while index < self.end_index:
+            yield index
+            index += self.chunk_size
+
+    def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data, _ = self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            self._write_to_stream(chunk_data, chunk_start)
+            self._update_progress(length)
+
+    def yield_chunk(self, chunk_start: int) -> Tuple[bytes, int]:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return self._download_chunk(chunk_start, chunk_end - 1)
+
+    def _update_progress(self, length: int) -> None:
+        if self.progress_lock:
+            with self.progress_lock:  # pylint: disable=not-context-manager
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock:
+            with self.stream_lock:  # pylint: disable=not-context-manager
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    def _do_optimize(self, given_range_start: int, given_range_end: int) -> bool:
+        # If we have no page range list stored, then assume there's data everywhere for that page blob
+        # or it's a block blob or append blob
+        if self.non_empty_ranges is None:
+            return False
+
+        for source_range in self.non_empty_ranges:
+            # Case 1: As the range list is sorted, if we've reached such a source_range
+            # we've checked all the appropriate source_range already and haven't found any overlapping.
+            # so the given range doesn't have any data and download optimization could be applied.
+            # given range:		|   |
+            # source range:			       |   |
+            if given_range_end < source_range['start']:  # pylint:disable=no-else-return
+                return True
+            # Case 2: the given range comes after source_range, continue checking.
+            # given range:				|   |
+            # source range:	|   |
+            elif source_range['end'] < given_range_start:
+                pass
+            # Case 3: source_range and given range overlap somehow, no need to optimize.
+            else:
+                return False
+        # Went through all src_ranges, but nothing overlapped. Optimization will be applied.
+        return True
+
+    def _download_chunk(self, chunk_start: int, chunk_end: int) -> Tuple[bytes, int]:
+        if self.encryption_options is None:
+            raise ValueError("Required argument is missing: encryption_options")
+        download_range, offset = process_range_and_offset(
+            chunk_start, chunk_end, chunk_end, self.encryption_options, self.encryption_data
+        )
+
+        # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
+        # Do optimize and create empty chunk locally if condition is met.
+        if self._do_optimize(download_range[0], download_range[1]):
+            content_length = download_range[1] - download_range[0] + 1
+            chunk_data = b"\x00" * content_length
+        else:
+            range_header, range_validation = validate_and_format_range_headers(
+                download_range[0],
+                download_range[1],
+                check_content_md5=self.validate_content
+            )
+
+            retry_active = True
+            retry_total = 3
+            while retry_active:
+                response: Any = None
+                try:
+                    _, response = self.client.download(
+                        range=range_header,
+                        range_get_content_md5=range_validation,
+                        validate_content=self.validate_content,
+                        data_stream_total=self.total_size,
+                        download_stream_current=self.progress_total,
+                        **self.request_options
+                    )
+                except HttpResponseError as error:
+                    process_storage_error(error)
+
+                try:
+                    chunk_data = process_content(response, offset[0], offset[1], self.encryption_options)
+                    retry_active = False
+                except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                    retry_total -= 1
+                    if retry_total <= 0:
+                        raise HttpResponseError(error, error=error) from error
+                    time.sleep(1)
+            content_length = response.content_length
+
+            # This makes sure that if_match is set so that we can validate
+            # that subsequent downloads are to an unmodified blob
+            if self.request_options.get("modified_access_conditions"):
+                self.request_options["modified_access_conditions"].if_match = response.properties.etag
+
+        return chunk_data, content_length
+
+
+class _ChunkIterator(object):
+    """Iterator for chunks in blob download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_ChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> Iterator[bytes]:
+        return self
+
+    # Iterate through responses.
+    def __next__(self) -> bytes:
+        if self._complete:
+            raise StopIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            next_chunk = next(self._iter_chunks)
+            self._current_content += self._iter_downloader.yield_chunk(next_chunk)[0]
+        except StopIteration as e:
+            self._complete = True
+            if self._current_content:
+                return self._current_content
+            raise e
+
+        # the current content from the first get is still there but smaller than chunk size
+        # therefore we want to make sure its also included
+        return self._get_chunk_data()
+
+    next = __next__  # Python 2 compatibility.
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(Generic[T]):  # pylint: disable=too-many-instance-attributes
+    """
+    A streaming object to download from Azure Storage.
+    """
+
+    name: str
+    """The name of the blob being downloaded."""
+    container: str
+    """The name of the container where the blob is."""
+    properties: "BlobProperties"
+    """The properties of the blob being downloaded. If only a range of the data is being
+    downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+    otherwise the total size of the blob."""
+
+    def __init__(
+        self,
+        clients: "AzureBlobStorage" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        encryption_options: Dict[str, Any] = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        container: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        download_cls: Optional[Callable] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.container = container
+        self.size = 0
+
+        self._clients = clients
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._encryption_options = encryption_options or {}
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._response = None
+        self._location_mode = None
+        self._current_content: Union[str, bytes] = b''
+        self._file_size = 0
+        self._non_empty_ranges = None
+        self._encryption_data: Optional["_EncryptionData"] = None
+
+        # The content download offset, after any processing (decryption), in bytes
+        self._download_offset = 0
+        # The raw download offset, before processing (decryption), in bytes
+        self._raw_download_offset = 0
+        # The offset the stream has been read to in bytes or chars depending on mode
+        self._read_offset = 0
+        # The offset into current_content that has been consumed in bytes or chars depending on mode
+        self._current_content_offset = 0
+
+        self._text_mode: Optional[bool] = None
+        self._decoder: Optional["IncrementalDecoder"] = None
+        # Whether the current content is the first chunk of download content or not
+        self._first_chunk = True
+        self._download_start = self._start_range or 0
+
+        # The cls is passed in via download_cls to avoid conflicting arg name with Generic.__new__
+        # but needs to be changed to cls in the request options.
+        self._request_options['cls'] = download_cls
+
+        if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None:
+            self._get_encryption_data_request()
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        first_get_size = (
+            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
+        )
+        initial_request_start = self._download_start
+        if self._end_range is not None and self._end_range - initial_request_start < first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + first_get_size - 1
+
+        self._initial_range, self._initial_offset = process_range_and_offset(
+            initial_request_start,
+            initial_request_end,
+            self._end_range,
+            self._encryption_options,
+            self._encryption_data
+        )
+
+        self._response = self._initial_request()
+        self.properties = cast("BlobProperties", self._response.properties)
+        self.properties.name = self.name
+        self.properties.container = self.container
+
+        # Set the content length to the download size instead of the size of the last range
+        self.properties.size = self.size
+        self.properties.content_range = (f"bytes {self._download_start}-"
+                                         f"{self._end_range if self._end_range is not None else self._file_size - 1}/"
+                                         f"{self._file_size}")
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+    def __len__(self):
+        return self.size
+
+    def _get_encryption_data_request(self) -> None:
+        # Save current request cls
+        download_cls = self._request_options.pop('cls', None)
+        # Adjust cls for get_properties
+        self._request_options['cls'] = deserialize_blob_properties
+
+        properties = cast("BlobProperties", self._clients.blob.get_properties(**self._request_options))
+        # This will return None if there is no encryption metadata or there are parsing errors.
+        # That is acceptable here, the proper error will be caught and surfaced when attempting
+        # to decrypt the blob.
+        self._encryption_data = parse_encryption_data(properties.metadata)
+
+        # Restore cls for download
+        self._request_options['cls'] = download_cls
+
+    @property
+    def _download_complete(self):
+        if is_encryption_v2(self._encryption_data):
+            return self._download_offset >= self.size
+        return self._raw_download_offset >= self.size
+
+    def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content
+        )
+
+        retry_active = True
+        retry_total = 3
+        while retry_active:
+            try:
+                location_mode, response = cast(Tuple[Optional[str], Any], self._clients.blob.download(
+                    range=range_header,
+                    range_get_content_md5=range_validation,
+                    validate_content=self._validate_content,
+                    data_stream_total=None,
+                    download_stream_current=0,
+                    **self._request_options
+                ))
+
+                # Check the location we read from to ensure we use the same one
+                # for subsequent requests.
+                self._location_mode = location_mode
+
+                # Parse the total file size and adjust the download size if ranges
+                # were specified
+                self._file_size = parse_length_from_content_range(response.properties.content_range)
+                if self._file_size is None:
+                    raise ValueError("Required Content-Range response header is missing or malformed.")
+                # Remove any extra encryption data size from blob size
+                self._file_size = adjust_blob_size_for_encryption(self._file_size, self._encryption_data)
+
+                if self._end_range is not None and self._start_range is not None:
+                    # Use the end range index unless it is over the end of the file
+                    self.size = min(self._file_size - self._start_range, self._end_range - self._start_range + 1)
+                elif self._start_range is not None:
+                    self.size = self._file_size - self._start_range
+                else:
+                    self.size = self._file_size
+
+            except HttpResponseError as error:
+                if self._start_range is None and error.response and error.response.status_code == 416:
+                    # Get range will fail on an empty file. If the user did not
+                    # request a range, do a regular get request in order to get
+                    # any properties.
+                    try:
+                        _, response = self._clients.blob.download(
+                            validate_content=self._validate_content,
+                            data_stream_total=0,
+                            download_stream_current=0,
+                            **self._request_options
+                        )
+                    except HttpResponseError as e:
+                        process_storage_error(e)
+
+                    # Set the download size to empty
+                    self.size = 0
+                    self._file_size = 0
+                else:
+                    process_storage_error(error)
+
+            try:
+                if self.size == 0:
+                    self._current_content = b""
+                else:
+                    self._current_content = process_content(
+                        response,
+                        self._initial_offset[0],
+                        self._initial_offset[1],
+                        self._encryption_options
+                    )
+                retry_active = False
+            except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                retry_total -= 1
+                if retry_total <= 0:
+                    raise HttpResponseError(error, error=error) from error
+                time.sleep(1)
+        self._download_offset += len(self._current_content)
+        self._raw_download_offset += response.content_length
+
+        # get page ranges to optimize downloading sparse page blob
+        if response.properties.blob_type == 'PageBlob':
+            try:
+                page_ranges = self._clients.page_blob.get_page_ranges()
+                self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
+            # according to the REST API documentation:
+            # in a highly fragmented page blob with a large number of writes,
+            # a Get Page Ranges request can fail due to an internal server timeout.
+            # thus, if the page blob is not sparse, it's ok for it to fail
+            except HttpResponseError:
+                pass
+
+        if not self._download_complete and self._request_options.get("modified_access_conditions"):
+            self._request_options["modified_access_conditions"].if_match = response.properties.etag
+
+        return response
+
+    def chunks(self) -> Iterator[bytes]:
+        """
+        Iterate over chunks in the download stream. Note, the iterator returned will
+        iterate over the entire download content, regardless of any data that was
+        previously read.
+
+        NOTE: If the stream has been partially read, some data may be re-downloaded by the iterator.
+
+        :returns: An iterator of the chunks in the download stream.
+        :rtype: Iterator[bytes]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world.py
+                :start-after: [START download_a_blob_in_chunk]
+                :end-before: [END download_a_blob_in_chunk]
+                :language: python
+                :dedent: 12
+                :caption: Download a blob using chunks().
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. chunks is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with chunks as only bytes are supported.")
+
+        iter_downloader = None
+        # If we still have the first chunk buffered, use it. Otherwise, download all content again
+        if not self._first_chunk or not self._download_complete:
+            if self._first_chunk:
+                start = self._download_start + len(self._current_content)
+                current_progress = len(self._current_content)
+            else:
+                start = self._download_start
+                current_progress = 0
+
+            end = self._download_start + self.size
+
+            iter_downloader = _ChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=current_progress,
+                start_range=start,
+                end_range=end,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                **self._request_options
+            )
+
+        initial_content = self._current_content if self._first_chunk else b''
+        return _ChunkIterator(
+            size=self.size,
+            content=cast(bytes, initial_content),
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size)
+
+    @overload
+    def read(self, size: int = -1) -> T:
+        ...
+
+    @overload
+    def read(self, *, chars: Optional[int] = None) -> T:
+        ...
+
+    # pylint: disable-next=too-many-statements,too-many-branches
+    def read(self, size: int = -1, *, chars: Optional[int] = None) -> T:
+        """
+        Read the specified bytes or chars from the stream. If `encoding`
+        was specified on `download_blob`, it is recommended to use the
+        chars parameter to read a specific number of chars to avoid decoding
+        errors. If size/chars is unspecified or negative all bytes will be read.
+
+        :param int size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set negative to download all bytes.
+        :keyword Optional[int] chars:
+            The number of chars to download from the stream. Leave unspecified
+            or set negative to download all chars. Note, this can only be used
+            when encoding is specified on `download_blob`.
+        :returns:
+            The requested data as bytes or a string if encoding was specified. If
+            the return value is empty, there is no more data to read.
+        :rtype: T
+        """
+        if size > -1 and self._encoding:
+            warnings.warn(
+                "Size parameter specified with text encoding enabled. It is recommended to use chars "
+                "to read a specific number of characters instead."
+            )
+        if size > -1 and chars is not None:
+            raise ValueError("Cannot specify both size and chars.")
+        if not self._encoding and chars is not None:
+            raise ValueError("Must specify encoding to read chars.")
+        if self._text_mode and size > -1:
+            raise ValueError("Stream has been partially read in text mode. Please use chars.")
+        if self._text_mode is False and chars is not None:
+            raise ValueError("Stream has been partially read in bytes mode. Please use size.")
+
+        # Empty blob or already read to the end
+        if (size == 0 or chars == 0 or
+                (self._download_complete and self._current_content_offset >= len(self._current_content))):
+            return b'' if not self._encoding else ''  # type: ignore [return-value]
+
+        if not self._text_mode and chars is not None and self._encoding is not None:
+            self._text_mode = True
+            self._decoder = codecs.getincrementaldecoder(self._encoding)('strict')
+            self._current_content = self._decoder.decode(
+                cast(bytes, self._current_content), final=self._download_complete)
+        elif self._text_mode is None:
+            self._text_mode = False
+
+        output_stream: Union[BytesIO, StringIO]
+        if self._text_mode:
+            output_stream = StringIO()
+            size = sys.maxsize if chars is None or chars <= 0 else chars
+        else:
+            output_stream = BytesIO()
+            size = size if size > 0 else sys.maxsize
+        readall = size == sys.maxsize
+        count = 0
+
+        # Start by reading from current_content
+        start = self._current_content_offset
+        length = min(len(self._current_content) - self._current_content_offset, size - count)
+        read = output_stream.write(self._current_content[start:start + length])  # type: ignore [arg-type]
+
+        count += read
+        self._current_content_offset += read
+        self._read_offset += read
+        self._check_and_report_progress()
+
+        remaining = size - count
+        if remaining > 0 and not self._download_complete:
+            # Create a downloader than can download the rest of the file
+            start = self._download_start + self._download_offset
+            end = self._download_start + self.size
+
+            parallel = self._max_concurrency > 1
+            downloader = _ChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._read_offset,
+                start_range=start,
+                end_range=end,
+                stream=output_stream,
+                parallel=parallel,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                progress_hook=self._progress_hook,
+                **self._request_options
+            )
+            self._first_chunk = False
+
+            # When reading all data, have the downloader read everything into the stream.
+            # Else, read one chunk at a time (using the downloader as an iterator) until
+            # the requested size is reached.
+            chunks_iter = downloader.get_chunk_offsets()
+            if readall and not self._text_mode:
+                # Only do parallel if there is more than one chunk left to download
+                if parallel and (self.size - self._download_offset) > self._config.max_chunk_get_size:
+                    import concurrent.futures
+                    with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor:
+                        list(executor.map(
+                            with_current_context(downloader.process_chunk),
+                            downloader.get_chunk_offsets()
+                        ))
+                else:
+                    for next_chunk in chunks_iter:
+                        downloader.process_chunk(next_chunk)
+
+                self._complete_read()
+
+            else:
+                while (chunk := next(chunks_iter, None)) is not None and remaining > 0:
+                    chunk_data, content_length = downloader.yield_chunk(chunk)
+                    self._download_offset += len(chunk_data)
+                    self._raw_download_offset += content_length
+                    if self._text_mode and self._decoder is not None:
+                        self._current_content = self._decoder.decode(chunk_data, final=self._download_complete)
+                    else:
+                        self._current_content = chunk_data
+
+                    if remaining < len(self._current_content):
+                        read = output_stream.write(self._current_content[:remaining])  # type: ignore [arg-type]
+                    else:
+                        read = output_stream.write(self._current_content)  # type: ignore [arg-type]
+
+                    self._current_content_offset = read
+                    self._read_offset += read
+                    remaining -= read
+                    self._check_and_report_progress()
+
+        data = output_stream.getvalue()
+        if not self._text_mode and self._encoding:
+            try:
+                # This is technically incorrect to do, but we have it for backwards compatibility.
+                data = cast(bytes, data).decode(self._encoding)
+            except UnicodeDecodeError:
+                warnings.warn(
+                    "Encountered a decoding error while decoding blob data from a partial read. "
+                    "Try using the `chars` keyword instead to read in text mode."
+                )
+                raise
+
+        return data  # type: ignore [return-value]
+
+    def readall(self) -> T:
+        """
+        Read the entire contents of this blob.
+        This operation is blocking until all data is downloaded.
+
+        :returns: The requested data as bytes or a string if encoding was specified.
+        :rtype: T
+        """
+        return self.read()
+
+    def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. readinto is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with readinto as only byte streams are supported.")
+
+        # The stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # If some data has been streamed using `read`, only stream the remaining data
+        remaining_size = self.size - self._read_offset
+        # Already read to the end
+        if remaining_size <= 0:
+            return 0
+
+        # Write the current content to the user stream
+        current_remaining = len(self._current_content) - self._current_content_offset
+        start = self._current_content_offset
+        count = stream.write(cast(bytes, self._current_content[start:start + current_remaining]))
+
+        self._current_content_offset += count
+        self._read_offset += count
+        if self._progress_hook:
+            self._progress_hook(self._read_offset, self.size)
+
+        # If all the data was already downloaded/buffered
+        if self._download_complete:
+            return remaining_size
+
+        data_start = self._download_start + self._read_offset
+        data_end = self._download_start + self.size
+
+        downloader = _ChunkDownloader(
+            client=self._clients.blob,
+            non_empty_ranges=self._non_empty_ranges,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._read_offset,
+            start_range=data_start,
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            encryption_options=self._encryption_options,
+            encryption_data=self._encryption_data,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            **self._request_options
+        )
+        if parallel:
+            import concurrent.futures
+            with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor:
+                list(executor.map(
+                        with_current_context(downloader.process_chunk),
+                        downloader.get_chunk_offsets()
+                    ))
+        else:
+            for chunk in downloader.get_chunk_offsets():
+                downloader.process_chunk(chunk)
+
+        self._complete_read()
+        return remaining_size
+
+    def _complete_read(self):
+        """Adjusts all offsets to the end of the download."""
+        self._download_offset = self.size
+        self._raw_download_offset = self.size
+        self._read_offset = self.size
+        self._current_content_offset = len(self._current_content)
+
+    def _check_and_report_progress(self):
+        """Reports progress if necessary."""
+        # Only report progress at the end of each chunk and use download_offset to always report
+        # progress in terms of (approximate) byte count.
+        if self._progress_hook and self._current_content_offset == len(self._current_content):
+            self._progress_hook(self._download_offset, self.size)
+
+    def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_bytes is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        return self.readall()
+
+    def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this blob, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :returns: The content of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_text is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return self.readall()
+
+    def download_to_stream(self, stream, max_concurrency=1):
+        """DEPRECATED: Download the contents of this blob to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO[T] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded blob.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "download_to_stream is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_encryption.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_encryption.py
new file mode 100644
index 00000000..42f5c51d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_encryption.py
@@ -0,0 +1,1127 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import math
+import os
+import sys
+import warnings
+from collections import OrderedDict
+from io import BytesIO
+from json import (
+    dumps,
+    loads,
+)
+from typing import Any, Callable, Dict, IO, Optional, Tuple, TYPE_CHECKING
+from typing import OrderedDict as TypedOrderedDict
+from typing_extensions import Protocol
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher
+from cryptography.hazmat.primitives.ciphers.aead import AESGCM
+from cryptography.hazmat.primitives.ciphers.algorithms import AES
+from cryptography.hazmat.primitives.ciphers.modes import CBC
+from cryptography.hazmat.primitives.padding import PKCS7
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.utils import CaseInsensitiveDict
+
+from ._version import VERSION
+from ._shared import decode_base64_to_bytes, encode_base64
+
+if TYPE_CHECKING:
+    from azure.core.pipeline import PipelineResponse
+    from cryptography.hazmat.primitives.ciphers import AEADEncryptionContext
+    from cryptography.hazmat.primitives.padding import PaddingContext
+
+
+_ENCRYPTION_PROTOCOL_V1 = '1.0'
+_ENCRYPTION_PROTOCOL_V2 = '2.0'
+_ENCRYPTION_PROTOCOL_V2_1 = '2.1'
+_VALID_ENCRYPTION_PROTOCOLS = [_ENCRYPTION_PROTOCOL_V1, _ENCRYPTION_PROTOCOL_V2, _ENCRYPTION_PROTOCOL_V2_1]
+_ENCRYPTION_V2_PROTOCOLS = [_ENCRYPTION_PROTOCOL_V2, _ENCRYPTION_PROTOCOL_V2_1]
+_GCM_REGION_DATA_LENGTH = 4 * 1024 * 1024
+_GCM_NONCE_LENGTH = 12
+_GCM_TAG_LENGTH = 16
+
+_ERROR_OBJECT_INVALID = \
+    '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
+
+_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
+    'The require_encryption flag is set, but encryption is not supported'
+    ' for this method.')
+
+
+class KeyEncryptionKey(Protocol):
+
+    def wrap_key(self, key: bytes) -> bytes:
+        ...
+
+    def unwrap_key(self, key: bytes, algorithm: str) -> bytes:
+        ...
+
+    def get_kid(self) -> str:
+        ...
+
+    def get_key_wrap_algorithm(self) -> str:
+        ...
+
+
+def _validate_not_none(param_name: str, param: Any):
+    if param is None:
+        raise ValueError(f'{param_name} should not be None.')
+
+
+def _validate_key_encryption_key_wrap(kek: KeyEncryptionKey):
+    # Note that None is not callable and so will fail the second clause of each check.
+    if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
+        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
+    if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
+        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+    if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
+        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
+
+
+class StorageEncryptionMixin(object):
+    def _configure_encryption(self, kwargs: Dict[str, Any]):
+        self.require_encryption = kwargs.get("require_encryption", False)
+        self.encryption_version = kwargs.get("encryption_version", "1.0")
+        self.key_encryption_key = kwargs.get("key_encryption_key")
+        self.key_resolver_function = kwargs.get("key_resolver_function")
+        if self.key_encryption_key and self.encryption_version == '1.0':
+            warnings.warn("This client has been configured to use encryption with version 1.0. " +
+                          "Version 1.0 is deprecated and no longer considered secure. It is highly " +
+                          "recommended that you switch to using version 2.0. The version can be " +
+                          "specified using the 'encryption_version' keyword.")
+
+
+class _EncryptionAlgorithm(object):
+    """
+    Specifies which client encryption algorithm is used.
+    """
+    AES_CBC_256 = 'AES_CBC_256'
+    AES_GCM_256 = 'AES_GCM_256'
+
+
+class _WrappedContentKey:
+    """
+    Represents the envelope key details stored on the service.
+    """
+
+    def __init__(self, algorithm: str, encrypted_key: bytes, key_id: str) -> None:
+        """
+        :param str algorithm:
+            The algorithm used for wrapping.
+        :param bytes encrypted_key:
+            The encrypted content-encryption-key.
+        :param str key_id:
+            The key-encryption-key identifier string.
+        """
+        _validate_not_none('algorithm', algorithm)
+        _validate_not_none('encrypted_key', encrypted_key)
+        _validate_not_none('key_id', key_id)
+
+        self.algorithm = algorithm
+        self.encrypted_key = encrypted_key
+        self.key_id = key_id
+
+
+class _EncryptedRegionInfo:
+    """
+    Represents the length of encryption elements.
+    This is only used for Encryption V2.
+    """
+
+    def __init__(self, data_length: int, nonce_length: int, tag_length: int) -> None:
+        """
+        :param int data_length:
+            The length of the encryption region data (not including nonce + tag).
+        :param int nonce_length:
+            The length of nonce used when encrypting.
+        :param int tag_length:
+            The length of the encryption tag.
+        """
+        _validate_not_none('data_length', data_length)
+        _validate_not_none('nonce_length', nonce_length)
+        _validate_not_none('tag_length', tag_length)
+
+        self.data_length = data_length
+        self.nonce_length = nonce_length
+        self.tag_length = tag_length
+
+
+class _EncryptionAgent:
+    """
+    Represents the encryption agent stored on the service.
+    It consists of the encryption protocol version and encryption algorithm used.
+    """
+
+    def __init__(self, encryption_algorithm: _EncryptionAlgorithm, protocol: str) -> None:
+        """
+        :param _EncryptionAlgorithm encryption_algorithm:
+            The algorithm used for encrypting the message contents.
+        :param str protocol:
+            The protocol version used for encryption.
+        """
+        _validate_not_none('encryption_algorithm', encryption_algorithm)
+        _validate_not_none('protocol', protocol)
+
+        self.encryption_algorithm = str(encryption_algorithm)
+        self.protocol = protocol
+
+
+class _EncryptionData:
+    """
+    Represents the encryption data that is stored on the service.
+    """
+
+    def __init__(
+        self, content_encryption_IV: Optional[bytes],
+        encrypted_region_info: Optional[_EncryptedRegionInfo],
+        encryption_agent: _EncryptionAgent,
+        wrapped_content_key: _WrappedContentKey,
+        key_wrapping_metadata: Dict[str, Any]
+    ) -> None:
+        """
+        :param Optional[bytes] content_encryption_IV:
+            The content encryption initialization vector.
+            Required for AES-CBC (V1).
+        :param Optional[_EncryptedRegionInfo] encrypted_region_info:
+            The info about the autenticated block sizes.
+            Required for AES-GCM (V2).
+        :param _EncryptionAgent encryption_agent:
+            The encryption agent.
+        :param _WrappedContentKey wrapped_content_key:
+            An object that stores the wrapping algorithm, the key identifier,
+            and the encrypted key bytes.
+        :param Dict[str, Any] key_wrapping_metadata:
+            A dict containing metadata related to the key wrapping.
+        """
+        _validate_not_none('encryption_agent', encryption_agent)
+        _validate_not_none('wrapped_content_key', wrapped_content_key)
+
+        # Validate we have the right matching optional parameter for the specified algorithm
+        if encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256:
+            _validate_not_none('content_encryption_IV', content_encryption_IV)
+        elif encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_GCM_256:
+            _validate_not_none('encrypted_region_info', encrypted_region_info)
+        else:
+            raise ValueError("Invalid encryption algorithm.")
+
+        self.content_encryption_IV = content_encryption_IV
+        self.encrypted_region_info = encrypted_region_info
+        self.encryption_agent = encryption_agent
+        self.wrapped_content_key = wrapped_content_key
+        self.key_wrapping_metadata = key_wrapping_metadata
+
+
+class GCMBlobEncryptionStream:
+    """
+    A stream that performs AES-GCM encryption on the given data as
+    it's streamed. Data is read and encrypted in regions. The stream
+    will use the same encryption key and will generate a guaranteed unique
+    nonce for each encryption region.
+    """
+    def __init__(
+        self, content_encryption_key: bytes,
+        data_stream: IO[bytes],
+    ) -> None:
+        """
+        :param bytes content_encryption_key: The encryption key to use.
+        :param IO[bytes] data_stream: The data stream to read data from.
+        """
+        self.content_encryption_key = content_encryption_key
+        self.data_stream = data_stream
+
+        self.offset = 0
+        self.current = b''
+        self.nonce_counter = 0
+
+    def read(self, size: int = -1) -> bytes:
+        """
+        Read data from the stream. Specify -1 to read all available data.
+
+        :param int size: The amount of data to read. Defaults to -1 for all data.
+        :return: The bytes read.
+        :rtype: bytes
+        """
+        result = BytesIO()
+        remaining = sys.maxsize if size == -1 else size
+
+        while remaining > 0:
+            # Start by reading from current
+            if len(self.current) > 0:
+                read = min(remaining, len(self.current))
+                result.write(self.current[:read])
+
+                self.current = self.current[read:]
+                self.offset += read
+                remaining -= read
+
+            if remaining > 0:
+                # Read one region of data and encrypt it
+                data = self.data_stream.read(_GCM_REGION_DATA_LENGTH)
+                if len(data) == 0:
+                    # No more data to read
+                    break
+
+                self.current = encrypt_data_v2(data, self.nonce_counter, self.content_encryption_key)
+                # IMPORTANT: Must increment the nonce each time.
+                self.nonce_counter += 1
+
+        return result.getvalue()
+
+
+def encrypt_data_v2(data: bytes, nonce: int, key: bytes) -> bytes:
+    """
+    Encrypts the given data using the given nonce and key using AES-GCM.
+    The result includes the data in the form: nonce + ciphertext + tag.
+
+    :param bytes data: The raw data to encrypt.
+    :param int nonce: The nonce to use for encryption.
+    :param bytes key: The encryption key to use for encryption.
+    :return: The encrypted bytes in the form: nonce + ciphertext + tag.
+    :rtype: bytes
+    """
+    nonce_bytes = nonce.to_bytes(_GCM_NONCE_LENGTH, 'big')
+    aesgcm = AESGCM(key)
+
+    # Returns ciphertext + tag
+    ciphertext_with_tag = aesgcm.encrypt(nonce_bytes, data, None)
+    return nonce_bytes + ciphertext_with_tag
+
+
+def is_encryption_v2(encryption_data: Optional[_EncryptionData]) -> bool:
+    """
+    Determine whether the given encryption data signifies version 2.0 or 2.1.
+
+    :param Optional[_EncryptionData] encryption_data: The encryption data. Will return False if this is None.
+    :return: True, if the encryption data indicates encryption V2, false otherwise.
+    :rtype: bool
+    """
+    # If encryption_data is None, assume no encryption
+    return bool(encryption_data and (encryption_data.encryption_agent.protocol in _ENCRYPTION_V2_PROTOCOLS))
+
+
+def modify_user_agent_for_encryption(
+        user_agent: str,
+        moniker: str,
+        encryption_version: str,
+        request_options: Dict[str, Any]
+    ) -> None:
+    """
+    Modifies the request options to contain a user agent string updated with encryption information.
+    Adds azstorage-clientsideencryption/<version> immediately proceeding the SDK descriptor.
+
+    :param str user_agent: The existing User Agent to modify.
+    :param str moniker: The specific SDK moniker. The modification will immediately proceed azsdk-python-{moniker}.
+    :param str encryption_version: The version of encryption being used.
+    :param Dict[str, Any] request_options: The reuqest options to add the user agent override to.
+    """
+    # If the user has specified user_agent_overwrite=True, don't make any modifications
+    if request_options.get('user_agent_overwrite'):
+        return
+
+    # If the feature flag is already present, don't add it again
+    feature_flag = f"azstorage-clientsideencryption/{encryption_version}"
+    if feature_flag in user_agent:
+        return
+
+    index = user_agent.find(f"azsdk-python-{moniker}")
+    user_agent = f"{user_agent[:index]}{feature_flag} {user_agent[index:]}"
+    # Since we are using user_agent_overwrite=True, we must prepend the user's user_agent if there is one
+    if request_options.get('user_agent'):
+        user_agent = f"{request_options.get('user_agent')} {user_agent}"
+
+    request_options['user_agent'] = user_agent
+    request_options['user_agent_overwrite'] = True
+
+
+def get_adjusted_upload_size(length: int, encryption_version: str) -> int:
+    """
+    Get the adjusted size of the blob upload which accounts for
+    extra encryption data (padding OR nonce + tag).
+
+    :param int length: The plaintext data length.
+    :param str encryption_version: The version of encryption being used.
+    :return: The new upload size to use.
+    :rtype: int
+    """
+    if encryption_version == _ENCRYPTION_PROTOCOL_V1:
+        return length + (16 - (length % 16))
+
+    if encryption_version == _ENCRYPTION_PROTOCOL_V2:
+        encryption_data_length = _GCM_NONCE_LENGTH + _GCM_TAG_LENGTH
+        regions = math.ceil(length / _GCM_REGION_DATA_LENGTH)
+        return length + (regions * encryption_data_length)
+
+    raise ValueError("Invalid encryption version specified.")
+
+
+def get_adjusted_download_range_and_offset(
+        start: int,
+        end: int,
+        length: Optional[int],
+        encryption_data: Optional[_EncryptionData]) -> Tuple[Tuple[int, int], Tuple[int, int]]:
+    """
+    Gets the new download range and offsets into the decrypted data for
+    the given user-specified range. The new download range will include all
+    the data needed to decrypt the user-provided range and will include only
+    full encryption regions.
+
+    The offsets returned will be the offsets needed to fetch the user-requested
+    data out of the full decrypted data. The end offset is different based on the
+    encryption version. For V1, the end offset is offset from the end whereas for
+    V2, the end offset is the ending index into the stream.
+    V1: decrypted_data[start_offset : len(decrypted_data) - end_offset]
+    V2: decrypted_data[start_offset : end_offset]
+
+    :param int start: The user-requested start index.
+    :param int end: The user-requested end index.
+    :param Optional[int] length: The user-requested length. Only used for V1.
+    :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes.
+    :return: (new start, new end), (start offset, end offset)
+    :rtype: Tuple[Tuple[int, int], Tuple[int, int]]
+    """
+    start_offset, end_offset = 0, 0
+    if encryption_data is None:
+        return (start, end), (start_offset, end_offset)
+
+    if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1:
+        if start is not None:
+            # Align the start of the range along a 16 byte block
+            start_offset = start % 16
+            start -= start_offset
+
+            # Include an extra 16 bytes for the IV if necessary
+            # Because of the previous offsetting, start_range will always
+            # be a multiple of 16.
+            if start > 0:
+                start_offset += 16
+                start -= 16
+
+        if length is not None:
+            # Align the end of the range along a 16 byte block
+            end_offset = 15 - (end % 16)
+            end += end_offset
+
+    elif encryption_data.encryption_agent.protocol in _ENCRYPTION_V2_PROTOCOLS:
+        start_offset, end_offset = 0, end
+
+        if encryption_data.encrypted_region_info is None:
+            raise ValueError("Missing required metadata for Encryption V2")
+
+        nonce_length = encryption_data.encrypted_region_info.nonce_length
+        data_length = encryption_data.encrypted_region_info.data_length
+        tag_length = encryption_data.encrypted_region_info.tag_length
+        region_length = nonce_length + data_length + tag_length
+        requested_length = end - start
+
+        if start is not None:
+            # Find which data region the start is in
+            region_num = start // data_length
+            # The start of the data region is different from the start of the encryption region
+            data_start = region_num * data_length
+            region_start = region_num * region_length
+            # Offset is based on data region
+            start_offset = start - data_start
+            # New start is the start of the encryption region
+            start = region_start
+
+        if end is not None:
+            # Find which data region the end is in
+            region_num = end // data_length
+            end_offset = start_offset + requested_length + 1
+            # New end is the end of the encryption region
+            end = (region_num * region_length) + region_length - 1
+
+    return (start, end), (start_offset, end_offset)
+
+
+def parse_encryption_data(metadata: Dict[str, Any]) -> Optional[_EncryptionData]:
+    """
+    Parses the encryption data out of the given blob metadata. If metadata does
+    not exist or there are parsing errors, this function will just return None.
+
+    :param Dict[str, Any] metadata: The blob metadata parsed from the response.
+    :return: The encryption data or None
+    :rtype: Optional[_EncryptionData]
+    """
+    try:
+        # Use case insensitive dict as key needs to be case-insensitive
+        case_insensitive_metadata = CaseInsensitiveDict(metadata)
+        return _dict_to_encryption_data(loads(case_insensitive_metadata['encryptiondata']))
+    except:  # pylint: disable=bare-except
+        return None
+
+
+def adjust_blob_size_for_encryption(size: int, encryption_data: Optional[_EncryptionData]) -> int:
+    """
+    Adjusts the given blob size for encryption by subtracting the size of
+    the encryption data (nonce + tag). This only has an affect for encryption V2.
+
+    :param int size: The original blob size.
+    :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes.
+    :return: The new blob size.
+    :rtype: int
+    """
+    if (encryption_data is not None and
+        encryption_data.encrypted_region_info is not None and
+        is_encryption_v2(encryption_data)):
+
+        nonce_length = encryption_data.encrypted_region_info.nonce_length
+        data_length = encryption_data.encrypted_region_info.data_length
+        tag_length = encryption_data.encrypted_region_info.tag_length
+        region_length = nonce_length + data_length + tag_length
+
+        num_regions = math.ceil(size / region_length)
+        metadata_size = num_regions * (nonce_length + tag_length)
+        return size - metadata_size
+
+    return size
+
+
+def _generate_encryption_data_dict(
+        kek: KeyEncryptionKey,
+        cek: bytes,
+        iv: Optional[bytes],
+        version: str
+    ) -> TypedOrderedDict[str, Any]:
+    """
+    Generates and returns the encryption metadata as a dict.
+
+    :param KeyEncryptionKey kek: The key encryption key. See calling functions for more information.
+    :param bytes cek: The content encryption key.
+    :param Optional[bytes] iv: The initialization vector. Only required for AES-CBC.
+    :param str version: The client encryption version used.
+    :return: A dict containing all the encryption metadata.
+    :rtype: Dict[str, Any]
+    """
+    # Encrypt the cek.
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        wrapped_cek = kek.wrap_key(cek)
+    # For V2, we include the encryption version in the wrapped key.
+    elif version == _ENCRYPTION_PROTOCOL_V2:
+        # We must pad the version to 8 bytes for AES Keywrap algorithms
+        to_wrap = _ENCRYPTION_PROTOCOL_V2.encode().ljust(8, b'\0') + cek
+        wrapped_cek = kek.wrap_key(to_wrap)
+    else:
+        raise ValueError("Invalid encryption version specified.")
+
+    # Build the encryption_data dict.
+    # Use OrderedDict to comply with Java's ordering requirement.
+    wrapped_content_key = OrderedDict()
+    wrapped_content_key['KeyId'] = kek.get_kid()
+    wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
+    wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
+
+    encryption_agent = OrderedDict()
+    encryption_agent['Protocol'] = version
+
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
+
+    elif version == _ENCRYPTION_PROTOCOL_V2:
+        encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_GCM_256
+
+        encrypted_region_info = OrderedDict()
+        encrypted_region_info['DataLength'] = _GCM_REGION_DATA_LENGTH
+        encrypted_region_info['NonceLength'] = _GCM_NONCE_LENGTH
+
+    encryption_data_dict: TypedOrderedDict[str, Any] = OrderedDict()
+    encryption_data_dict['WrappedContentKey'] = wrapped_content_key
+    encryption_data_dict['EncryptionAgent'] = encryption_agent
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
+    elif version == _ENCRYPTION_PROTOCOL_V2:
+        encryption_data_dict['EncryptedRegionInfo'] = encrypted_region_info
+    encryption_data_dict['KeyWrappingMetadata'] = OrderedDict({'EncryptionLibrary': 'Python ' + VERSION})
+
+    return encryption_data_dict
+
+
+def _dict_to_encryption_data(encryption_data_dict: Dict[str, Any]) -> _EncryptionData:
+    """
+    Converts the specified dictionary to an EncryptionData object for
+    eventual use in decryption.
+
+    :param dict encryption_data_dict:
+        The dictionary containing the encryption data.
+    :return: an _EncryptionData object built from the dictionary.
+    :rtype: _EncryptionData
+    """
+    try:
+        protocol = encryption_data_dict['EncryptionAgent']['Protocol']
+        if protocol not in _VALID_ENCRYPTION_PROTOCOLS:
+            raise ValueError("Unsupported encryption version.")
+    except KeyError as exc:
+        raise ValueError("Unsupported encryption version.") from exc
+    wrapped_content_key = encryption_data_dict['WrappedContentKey']
+    wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
+                                             decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
+                                             wrapped_content_key['KeyId'])
+
+    encryption_agent = encryption_data_dict['EncryptionAgent']
+    encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
+                                        encryption_agent['Protocol'])
+
+    if 'KeyWrappingMetadata' in encryption_data_dict:
+        key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
+    else:
+        key_wrapping_metadata = None
+
+    # AES-CBC only
+    encryption_iv = None
+    if 'ContentEncryptionIV' in encryption_data_dict:
+        encryption_iv = decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV'])
+
+    # AES-GCM only
+    region_info = None
+    if 'EncryptedRegionInfo' in encryption_data_dict:
+        encrypted_region_info = encryption_data_dict['EncryptedRegionInfo']
+        region_info = _EncryptedRegionInfo(encrypted_region_info['DataLength'],
+                                           encrypted_region_info['NonceLength'],
+                                           _GCM_TAG_LENGTH)
+
+    encryption_data = _EncryptionData(encryption_iv,
+                                      region_info,
+                                      encryption_agent,
+                                      wrapped_content_key,
+                                      key_wrapping_metadata)
+
+    return encryption_data
+
+
+def _generate_AES_CBC_cipher(cek: bytes, iv: bytes) -> Cipher:
+    """
+    Generates and returns an encryption cipher for AES CBC using the given cek and iv.
+
+    :param bytes[] cek: The content encryption key for the cipher.
+    :param bytes[] iv: The initialization vector for the cipher.
+    :return: A cipher for encrypting in AES256 CBC.
+    :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
+    """
+
+    backend = default_backend()
+    algorithm = AES(cek)
+    mode = CBC(iv)
+    return Cipher(algorithm, mode, backend)
+
+
+def _validate_and_unwrap_cek(
+    encryption_data: _EncryptionData,
+    key_encryption_key: Optional[KeyEncryptionKey] = None,
+    key_resolver: Optional[Callable[[str], KeyEncryptionKey]] = None
+) -> bytes:
+    """
+    Extracts and returns the content_encryption_key stored in the encryption_data object
+    and performs necessary validation on all parameters.
+    :param _EncryptionData encryption_data:
+        The encryption metadata of the retrieved value.
+    :param Optional[KeyEncryptionKey] key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param Optional[Callable[[str], KeyEncryptionKey]] key_resolver:
+        A function used that, given a key_id, will return a key_encryption_key. Please refer
+        to high-level service object instance variables for more details.
+    :return: The content_encryption_key stored in the encryption_data object.
+    :rtype: bytes
+    """
+
+    _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
+
+    # Validate we have the right info for the specified version
+    if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1:
+        _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
+    elif encryption_data.encryption_agent.protocol in _ENCRYPTION_V2_PROTOCOLS:
+        _validate_not_none('encrypted_region_info', encryption_data.encrypted_region_info)
+    else:
+        raise ValueError('Specified encryption version is not supported.')
+
+    content_encryption_key: Optional[bytes] = None
+
+    # If the resolver exists, give priority to the key it finds.
+    if key_resolver is not None:
+        key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
+
+    if key_encryption_key is None:
+        raise ValueError("Unable to decrypt. key_resolver and key_encryption_key cannot both be None.")
+    if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
+        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+    if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
+        raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
+    if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
+        raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
+    # Will throw an exception if the specified algorithm is not supported.
+    content_encryption_key = key_encryption_key.unwrap_key(
+        encryption_data.wrapped_content_key.encrypted_key,
+        encryption_data.wrapped_content_key.algorithm)
+
+    # For V2, the version is included with the cek. We need to validate it
+    # and remove it from the actual cek.
+    if encryption_data.encryption_agent.protocol in _ENCRYPTION_V2_PROTOCOLS:
+        version_2_bytes = encryption_data.encryption_agent.protocol.encode().ljust(8, b'\0')
+        cek_version_bytes = content_encryption_key[:len(version_2_bytes)]
+        if cek_version_bytes != version_2_bytes:
+            raise ValueError('The encryption metadata is not valid and may have been modified.')
+
+        # Remove version from the start of the cek.
+        content_encryption_key = content_encryption_key[len(version_2_bytes):]
+
+    _validate_not_none('content_encryption_key', content_encryption_key)
+
+    return content_encryption_key
+
+
+def _decrypt_message(
+    message: bytes,
+    encryption_data: _EncryptionData,
+    key_encryption_key: Optional[KeyEncryptionKey] = None,
+    resolver: Optional[Callable[[str], KeyEncryptionKey]] = None
+) -> bytes:
+    """
+    Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
+    Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
+    Returns the original plaintext.
+
+    :param bytes message:
+        The ciphertext to be decrypted.
+    :param _EncryptionData encryption_data:
+        The metadata associated with this ciphertext.
+    :param Optional[KeyEncryptionKey] key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param Optional[Callable[[str], KeyEncryptionKey]] resolver:
+        The user-provided key resolver. Uses the kid string to return a key-encryption-key
+        implementing the interface defined above.
+    :return: The decrypted plaintext.
+    :rtype: bytes
+    """
+    _validate_not_none('message', message)
+    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
+
+    if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1:
+        if not encryption_data.content_encryption_IV:
+            raise ValueError("Missing required metadata for decryption.")
+
+        cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
+
+        # decrypt data
+        decryptor = cipher.decryptor()
+        decrypted_data = (decryptor.update(message) + decryptor.finalize())
+
+        # unpad data
+        unpadder = PKCS7(128).unpadder()
+        decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
+
+    elif encryption_data.encryption_agent.protocol in _ENCRYPTION_V2_PROTOCOLS:
+        block_info = encryption_data.encrypted_region_info
+        if not block_info or not block_info.nonce_length:
+            raise ValueError("Missing required metadata for decryption.")
+
+        if encryption_data.encrypted_region_info is None:
+            raise ValueError("Missing required metadata for Encryption V2")
+
+        nonce_length = int(encryption_data.encrypted_region_info.nonce_length)
+
+        # First bytes are the nonce
+        nonce = message[:nonce_length]
+        ciphertext_with_tag = message[nonce_length:]
+
+        aesgcm = AESGCM(content_encryption_key)
+        decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None)
+
+    else:
+        raise ValueError('Specified encryption version is not supported.')
+
+    return decrypted_data
+
+
+def encrypt_blob(blob: bytes, key_encryption_key: KeyEncryptionKey, version: str) -> Tuple[str, bytes]:
+    """
+    Encrypts the given blob using the given encryption protocol version.
+    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+    Returns a json-formatted string containing the encryption metadata. This method should
+    only be used when a blob is small enough for single shot upload. Encrypting larger blobs
+    is done as a part of the upload_data_chunks method.
+
+    :param bytes blob:
+        The blob to be encrypted.
+    :param KeyEncryptionKey key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param str version: The client encryption version to use.
+    :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
+    :rtype: (str, bytes)
+    """
+
+    _validate_not_none('blob', blob)
+    _validate_not_none('key_encryption_key', key_encryption_key)
+    _validate_key_encryption_key_wrap(key_encryption_key)
+
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
+        content_encryption_key = os.urandom(32)
+        initialization_vector = os.urandom(16)
+
+        cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+        # PKCS7 with 16 byte blocks ensures compatibility with AES.
+        padder = PKCS7(128).padder()
+        padded_data = padder.update(blob) + padder.finalize()
+
+        # Encrypt the data.
+        encryptor = cipher.encryptor()
+        encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+
+    elif version == _ENCRYPTION_PROTOCOL_V2:
+        # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce.
+        content_encryption_key = os.urandom(32)
+        initialization_vector = None
+
+        data = BytesIO(blob)
+        encryption_stream = GCMBlobEncryptionStream(content_encryption_key, data)
+
+        encrypted_data = encryption_stream.read()
+
+    else:
+        raise ValueError("Invalid encryption version specified.")
+
+    encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
+                                                     initialization_vector, version)
+    encryption_data['EncryptionMode'] = 'FullBlob'
+
+    return dumps(encryption_data), encrypted_data
+
+
+def generate_blob_encryption_data(
+    key_encryption_key: Optional[KeyEncryptionKey],
+    version: str
+) -> Tuple[Optional[bytes], Optional[bytes], Optional[str]]:
+    """
+    Generates the encryption_metadata for the blob.
+
+    :param Optional[KeyEncryptionKey] key_encryption_key:
+        The key-encryption-key used to wrap the cek associate with this blob.
+    :param str version: The client encryption version to use.
+    :return: A tuple containing the cek and iv for this blob as well as the
+        serialized encryption metadata for the blob.
+    :rtype: (Optional[bytes], Optional[bytes], Optional[str])
+    """
+
+    encryption_data = None
+    content_encryption_key = None
+    initialization_vector = None
+    if key_encryption_key:
+        _validate_key_encryption_key_wrap(key_encryption_key)
+        content_encryption_key = os.urandom(32)
+        # Initialization vector only needed for V1
+        if version == _ENCRYPTION_PROTOCOL_V1:
+            initialization_vector = os.urandom(16)
+        encryption_data_dict = _generate_encryption_data_dict(key_encryption_key,
+                                                         content_encryption_key,
+                                                         initialization_vector,
+                                                         version)
+        encryption_data_dict['EncryptionMode'] = 'FullBlob'
+        encryption_data = dumps(encryption_data_dict)
+
+    return content_encryption_key, initialization_vector, encryption_data
+
+
+def decrypt_blob(  # pylint: disable=too-many-locals,too-many-statements
+        require_encryption: bool,
+        key_encryption_key: Optional[KeyEncryptionKey],
+        key_resolver: Optional[Callable[[str], KeyEncryptionKey]],
+        content: bytes,
+        start_offset: int,
+        end_offset: int,
+        response_headers: Dict[str, Any]
+) -> bytes:
+    """
+    Decrypts the given blob contents and returns only the requested range.
+
+    :param bool require_encryption:
+        Whether the calling blob service requires objects to be decrypted.
+    :param Optional[KeyEncryptionKey] key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param key_resolver:
+        The user-provided key resolver. Uses the kid string to return a key-encryption-key
+        implementing the interface defined above.
+    :type key_resolver: Optional[Callable[[str], KeyEncryptionKey]]
+    :param bytes content:
+        The encrypted blob content.
+    :param int start_offset:
+        The adjusted offset from the beginning of the *decrypted* content for the caller's data.
+    :param int end_offset:
+        The adjusted offset from the end of the *decrypted* content for the caller's data.
+    :param Dict[str, Any] response_headers:
+        A dictionary of response headers from the download request. Expected to include the
+        'x-ms-meta-encryptiondata' header if the blob was encrypted.
+    :return: The decrypted blob content.
+    :rtype: bytes
+    """
+    try:
+        encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
+    except Exception as exc:  # pylint: disable=broad-except
+        if require_encryption:
+            raise ValueError(
+                'Encryption required, but received data does not contain appropriate metadata.' + \
+                'Data was either not encrypted or metadata has been lost.') from exc
+
+        return content
+
+    algorithm = encryption_data.encryption_agent.encryption_algorithm
+    if algorithm not in(_EncryptionAlgorithm.AES_CBC_256, _EncryptionAlgorithm.AES_GCM_256):
+        raise ValueError('Specified encryption algorithm is not supported.')
+
+    version = encryption_data.encryption_agent.protocol
+    if version not in _VALID_ENCRYPTION_PROTOCOLS:
+        raise ValueError('Specified encryption version is not supported.')
+
+    content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
+
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        blob_type = response_headers['x-ms-blob-type']
+
+        iv: Optional[bytes] = None
+        unpad = False
+        if 'content-range' in response_headers:
+            content_range = response_headers['content-range']
+            # Format: 'bytes x-y/size'
+
+            # Ignore the word 'bytes'
+            content_range = content_range.split(' ')
+
+            content_range = content_range[1].split('-')
+            content_range = content_range[1].split('/')
+            end_range = int(content_range[0])
+            blob_size = int(content_range[1])
+
+            if start_offset >= 16:
+                iv = content[:16]
+                content = content[16:]
+                start_offset -= 16
+            else:
+                iv = encryption_data.content_encryption_IV
+
+            if end_range == blob_size - 1:
+                unpad = True
+        else:
+            unpad = True
+            iv = encryption_data.content_encryption_IV
+
+        if blob_type == 'PageBlob':
+            unpad = False
+
+        if iv is None:
+            raise ValueError("Missing required metadata for Encryption V1")
+
+        cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
+        decryptor = cipher.decryptor()
+
+        content = decryptor.update(content) + decryptor.finalize()
+        if unpad:
+            unpadder = PKCS7(128).unpadder()
+            content = unpadder.update(content) + unpadder.finalize()
+
+        return content[start_offset: len(content) - end_offset]
+
+    if version in _ENCRYPTION_V2_PROTOCOLS:
+        # We assume the content contains only full encryption regions
+        total_size = len(content)
+        offset = 0
+
+        if encryption_data.encrypted_region_info is None:
+            raise ValueError("Missing required metadata for Encryption V2")
+
+        nonce_length = encryption_data.encrypted_region_info.nonce_length
+        data_length = encryption_data.encrypted_region_info.data_length
+        tag_length = encryption_data.encrypted_region_info.tag_length
+        region_length = nonce_length + data_length + tag_length
+
+        decrypted_content = bytearray()
+        while offset < total_size:
+            # Process one encryption region at a time
+            process_size = min(region_length, total_size)
+            encrypted_region = content[offset:offset + process_size]
+
+            # First bytes are the nonce
+            nonce = encrypted_region[:nonce_length]
+            ciphertext_with_tag = encrypted_region[nonce_length:]
+
+            aesgcm = AESGCM(content_encryption_key)
+            decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None)
+            decrypted_content.extend(decrypted_data)
+
+            offset += process_size
+
+        # Read the caller requested data from the decrypted content
+        return decrypted_content[start_offset:end_offset]
+
+    raise ValueError('Specified encryption version is not supported.')
+
+
+def get_blob_encryptor_and_padder(
+    cek: Optional[bytes],
+    iv: Optional[bytes],
+    should_pad: bool
+) -> Tuple[Optional["AEADEncryptionContext"], Optional["PaddingContext"]]:
+    encryptor = None
+    padder = None
+
+    if cek is not None and iv is not None:
+        cipher = _generate_AES_CBC_cipher(cek, iv)
+        encryptor = cipher.encryptor()
+        padder = PKCS7(128).padder() if should_pad else None
+
+    return encryptor, padder
+
+
+def encrypt_queue_message(message: str, key_encryption_key: KeyEncryptionKey, version: str) -> str:
+    """
+    Encrypts the given plain text message using the given protocol version.
+    Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+    Returns a json-formatted string containing the encrypted message and the encryption metadata.
+
+    :param str message:
+        The plain text message to be encrypted.
+    :param KeyEncryptionKey key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param str version: The client encryption version to use.
+    :return: A json-formatted string containing the encrypted message and the encryption metadata.
+    :rtype: str
+    """
+
+    _validate_not_none('message', message)
+    _validate_not_none('key_encryption_key', key_encryption_key)
+    _validate_key_encryption_key_wrap(key_encryption_key)
+
+    # Queue encoding functions all return unicode strings, and encryption should
+    # operate on binary strings.
+    message_as_bytes: bytes = message.encode('utf-8')
+
+    if version == _ENCRYPTION_PROTOCOL_V1:
+        # AES256 CBC uses 256 bit (32 byte) keys and always with 16 byte blocks
+        content_encryption_key = os.urandom(32)
+        initialization_vector = os.urandom(16)
+
+        cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+        # PKCS7 with 16 byte blocks ensures compatibility with AES.
+        padder = PKCS7(128).padder()
+        padded_data = padder.update(message_as_bytes) + padder.finalize()
+
+        # Encrypt the data.
+        encryptor = cipher.encryptor()
+        encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+
+    elif version == _ENCRYPTION_PROTOCOL_V2:
+        # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce.
+        content_encryption_key = os.urandom(32)
+        initialization_vector = None
+
+        # The nonce MUST be different for each key
+        nonce = os.urandom(12)
+        aesgcm = AESGCM(content_encryption_key)
+
+        # Returns ciphertext + tag
+        cipertext_with_tag = aesgcm.encrypt(nonce, message_as_bytes, None)
+        encrypted_data = nonce + cipertext_with_tag
+
+    else:
+        raise ValueError("Invalid encryption version specified.")
+
+    # Build the dictionary structure.
+    queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
+                     'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
+                                                                      content_encryption_key,
+                                                                      initialization_vector,
+                                                                      version)}
+
+    return dumps(queue_message)
+
+
+def decrypt_queue_message(
+    message: str,
+    response: "PipelineResponse",
+    require_encryption: bool,
+    key_encryption_key: Optional[KeyEncryptionKey],
+    resolver: Optional[Callable[[str], KeyEncryptionKey]]
+) -> str:
+    """
+    Returns the decrypted message contents from an EncryptedQueueMessage.
+    If no encryption metadata is present, will return the unaltered message.
+    :param str message:
+        The JSON formatted QueueEncryptedMessage contents with all associated metadata.
+    :param Any response:
+        The pipeline response used to generate an error with.
+    :param bool require_encryption:
+        If set, will enforce that the retrieved messages are encrypted and decrypt them.
+    :param Optional[KeyEncryptionKey] key_encryption_key:
+        The user-provided key-encryption-key. Must implement the following methods:
+        wrap_key(key)
+            - Wraps the specified key using an algorithm of the user's choice.
+        get_key_wrap_algorithm()
+            - Returns the algorithm used to wrap the specified symmetric key.
+        get_kid()
+            - Returns a string key id for this key-encryption-key.
+    :param Optional[Callable[[str], KeyEncryptionKey]] resolver:
+        The user-provided key resolver. Uses the kid string to return a key-encryption-key
+        implementing the interface defined above.
+    :return: The plain text message from the queue message.
+    :rtype: str
+    """
+    response = response.http_response
+
+    try:
+        deserialized_message: Dict[str, Any] = loads(message)
+
+        encryption_data = _dict_to_encryption_data(deserialized_message['EncryptionData'])
+        decoded_data = decode_base64_to_bytes(deserialized_message['EncryptedMessageContents'])
+    except (KeyError, ValueError) as exc:
+        # Message was not json formatted and so was not encrypted
+        # or the user provided a json formatted message
+        # or the metadata was malformed.
+        if require_encryption:
+            raise ValueError(
+                'Encryption required, but received message does not contain appropriate metatadata. ' + \
+                'Message was either not encrypted or metadata was incorrect.') from exc
+
+        return message
+    try:
+        return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
+    except Exception as error:
+        raise HttpResponseError(
+            message="Decryption failed.",
+            response=response, #type: ignore [arg-type]
+            error=error) from error
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/__init__.py
new file mode 100644
index 00000000..c57ce36e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_blob_storage import AzureBlobStorage  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureBlobStorage",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_azure_blob_storage.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_azure_blob_storage.py
new file mode 100644
index 00000000..a429b713
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_azure_blob_storage.py
@@ -0,0 +1,119 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any
+from typing_extensions import Self
+
+from azure.core import PipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest, HttpResponse
+
+from . import models as _models
+from ._configuration import AzureBlobStorageConfiguration
+from ._serialization import Deserializer, Serializer
+from .operations import (
+    AppendBlobOperations,
+    BlobOperations,
+    BlockBlobOperations,
+    ContainerOperations,
+    PageBlobOperations,
+    ServiceOperations,
+)
+
+
+class AzureBlobStorage:  # pylint: disable=client-accepts-api-version-keyword
+    """AzureBlobStorage.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.blob.operations.ServiceOperations
+    :ivar container: ContainerOperations operations
+    :vartype container: azure.storage.blob.operations.ContainerOperations
+    :ivar blob: BlobOperations operations
+    :vartype blob: azure.storage.blob.operations.BlobOperations
+    :ivar page_blob: PageBlobOperations operations
+    :vartype page_blob: azure.storage.blob.operations.PageBlobOperations
+    :ivar append_blob: AppendBlobOperations operations
+    :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations
+    :ivar block_blob: BlockBlobOperations operations
+    :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", **kwargs: Any
+    ) -> None:
+        self._config = AzureBlobStorageConfiguration(url=url, **kwargs)
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: PipelineClient = PipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.container = ContainerOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.blob = BlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.page_blob = PageBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.append_blob = AppendBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.block_blob = BlockBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = client._send_request(request)
+        <HttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.HttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    def close(self) -> None:
+        self._client.close()
+
+    def __enter__(self) -> Self:
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *exc_details: Any) -> None:
+        self._client.__exit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_configuration.py
new file mode 100644
index 00000000..2af7d1d2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_configuration.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureBlobStorageConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureBlobStorage.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, **kwargs: Any) -> None:
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azureblobstorage/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_serialization.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_serialization.py
new file mode 100644
index 00000000..a066e16a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/_serialization.py
@@ -0,0 +1,2050 @@
+# pylint: disable=too-many-lines
+# --------------------------------------------------------------------------
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+#
+# The MIT License (MIT)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the ""Software""), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+    Dict,
+    Any,
+    cast,
+    Optional,
+    Union,
+    AnyStr,
+    IO,
+    Mapping,
+    Callable,
+    MutableMapping,
+    List,
+)
+
+try:
+    from urllib import quote  # type: ignore
+except ImportError:
+    from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate  # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+    # Accept "text" because we're open minded people...
+    JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+    # Name used in context
+    CONTEXT_NAME = "deserialized_data"
+
+    @classmethod
+    def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+        """Decode data according to content-type.
+
+        Accept a stream of data as well, but will be load at once in memory for now.
+
+        If no content-type, will return the string version (not bytes, not stream)
+
+        :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+        :type data: str or bytes or IO
+        :param str content_type: The content type.
+        :return: The deserialized data.
+        :rtype: object
+        """
+        if hasattr(data, "read"):
+            # Assume a stream
+            data = cast(IO, data).read()
+
+        if isinstance(data, bytes):
+            data_as_str = data.decode(encoding="utf-8-sig")
+        else:
+            # Explain to mypy the correct type.
+            data_as_str = cast(str, data)
+
+            # Remove Byte Order Mark if present in string
+            data_as_str = data_as_str.lstrip(_BOM)
+
+        if content_type is None:
+            return data
+
+        if cls.JSON_REGEXP.match(content_type):
+            try:
+                return json.loads(data_as_str)
+            except ValueError as err:
+                raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+        elif "xml" in (content_type or []):
+            try:
+
+                try:
+                    if isinstance(data, unicode):  # type: ignore
+                        # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+                        data_as_str = data_as_str.encode(encoding="utf-8")  # type: ignore
+                except NameError:
+                    pass
+
+                return ET.fromstring(data_as_str)  # nosec
+            except ET.ParseError as err:
+                # It might be because the server has an issue, and returned JSON with
+                # content-type XML....
+                # So let's try a JSON load, and if it's still broken
+                # let's flow the initial exception
+                def _json_attemp(data):
+                    try:
+                        return True, json.loads(data)
+                    except ValueError:
+                        return False, None  # Don't care about this one
+
+                success, json_result = _json_attemp(data)
+                if success:
+                    return json_result
+                # If i'm here, it's not JSON, it's not XML, let's scream
+                # and raise the last context in this block (the XML exception)
+                # The function hack is because Py2.7 messes up with exception
+                # context otherwise.
+                _LOGGER.critical("Wasn't XML not JSON, failing")
+                raise DeserializationError("XML is invalid") from err
+        elif content_type.startswith("text/"):
+            return data_as_str
+        raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+    @classmethod
+    def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+        """Deserialize from HTTP response.
+
+        Use bytes and headers to NOT use any requests/aiohttp or whatever
+        specific implementation.
+        Headers will tested for "content-type"
+
+        :param bytes body_bytes: The body of the response.
+        :param dict headers: The headers of the response.
+        :returns: The deserialized data.
+        :rtype: object
+        """
+        # Try to use content-type from headers if available
+        content_type = None
+        if "content-type" in headers:
+            content_type = headers["content-type"].split(";")[0].strip().lower()
+        # Ouch, this server did not declare what it sent...
+        # Let's guess it's JSON...
+        # Also, since Autorest was considering that an empty body was a valid JSON,
+        # need that test as well....
+        else:
+            content_type = "application/json"
+
+        if body_bytes:
+            return cls.deserialize_from_text(body_bytes, content_type)
+        return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+    _long_type = long  # type: ignore
+except NameError:
+    _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(?<!\\)\.")
+
+
+def attribute_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the Python attribute.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A key using attribute name
+    :rtype: str
+    """
+    return (key, value)
+
+
+def full_restapi_key_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the full RestAPI key path.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A list of keys using RestAPI syntax.
+    :rtype: list
+    """
+    keys = _FLATTEN.split(attr_desc["key"])
+    return ([_decode_attribute_map_key(k) for k in keys], value)
+
+
+def last_restapi_key_transformer(key, attr_desc, value):
+    """A key transformer that returns the last RestAPI key.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: The last RestAPI key.
+    :rtype: str
+    """
+    key, value = full_restapi_key_transformer(key, attr_desc, value)
+    return (key[-1], value)
+
+
+def _create_xml_node(tag, prefix=None, ns=None):
+    """Create a XML node.
+
+    :param str tag: The tag name
+    :param str prefix: The prefix
+    :param str ns: The namespace
+    :return: The XML node
+    :rtype: xml.etree.ElementTree.Element
+    """
+    if prefix and ns:
+        ET.register_namespace(prefix, ns)
+    if ns:
+        return ET.Element("{" + ns + "}" + tag)
+    return ET.Element(tag)
+
+
+class Model:
+    """Mixin for all client request body/response body models to support
+    serialization and deserialization.
+    """
+
+    _subtype_map: Dict[str, Dict[str, Any]] = {}
+    _attribute_map: Dict[str, Dict[str, Any]] = {}
+    _validation: Dict[str, Dict[str, Any]] = {}
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.additional_properties: Optional[Dict[str, Any]] = {}
+        for k in kwargs:  # pylint: disable=consider-using-dict-items
+            if k not in self._attribute_map:
+                _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+            elif k in self._validation and self._validation[k].get("readonly", False):
+                _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+            else:
+                setattr(self, k, kwargs[k])
+
+    def __eq__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are equal
+        :rtype: bool
+        """
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    def __ne__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are not equal
+        :rtype: bool
+        """
+        return not self.__eq__(other)
+
+    def __str__(self) -> str:
+        return str(self.__dict__)
+
+    @classmethod
+    def enable_additional_properties_sending(cls) -> None:
+        cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+    @classmethod
+    def is_xml_model(cls) -> bool:
+        try:
+            cls._xml_map  # type: ignore
+        except AttributeError:
+            return False
+        return True
+
+    @classmethod
+    def _create_xml_node(cls):
+        """Create XML node.
+
+        :returns: The XML node
+        :rtype: xml.etree.ElementTree.Element
+        """
+        try:
+            xml_map = cls._xml_map  # type: ignore
+        except AttributeError:
+            xml_map = {}
+
+        return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+    def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+        """Return the JSON that would be sent to server from this model.
+
+        This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, keep_readonly=keep_readonly, **kwargs
+        )
+
+    def as_dict(
+        self,
+        keep_readonly: bool = True,
+        key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer,
+        **kwargs: Any
+    ) -> JSON:
+        """Return a dict that can be serialized using json.dump.
+
+        Advanced usage might optionally use a callback as parameter:
+
+        .. code::python
+
+            def my_key_transformer(key, attr_desc, value):
+                return key
+
+        Key is the attribute name used in Python. Attr_desc
+        is a dict of metadata. Currently contains 'type' with the
+        msrest type and 'key' with the RestAPI encoded key.
+        Value is the current value in this object.
+
+        The string returned will be used to serialize the key.
+        If the return type is a list, this is considered hierarchical
+        result dict.
+
+        See the three examples in this file:
+
+        - attribute_transformer
+        - full_restapi_key_transformer
+        - last_restapi_key_transformer
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :param function key_transformer: A key transformer function.
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+        )
+
+    @classmethod
+    def _infer_class_models(cls):
+        try:
+            str_models = cls.__module__.rsplit(".", 1)[0]
+            models = sys.modules[str_models]
+            client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+            if cls.__name__ not in client_models:
+                raise ValueError("Not Autorest generated code")
+        except Exception:  # pylint: disable=broad-exception-caught
+            # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+            client_models = {cls.__name__: cls}
+        return client_models
+
+    @classmethod
+    def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+        """Parse a str using the RestAPI syntax and return a model.
+
+        :param str data: A str using RestAPI structure. JSON by default.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises DeserializationError: if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def from_dict(
+        cls,
+        data: Any,
+        key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None,
+        content_type: Optional[str] = None,
+    ) -> Self:
+        """Parse a dict using given key extractor return a model.
+
+        By default consider key
+        extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+        and last_rest_key_case_insensitive_extractor)
+
+        :param dict data: A dict using RestAPI structure
+        :param function key_extractors: A key extractor function.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises: DeserializationError if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        deserializer.key_extractors = (  # type: ignore
+            [  # type: ignore
+                attribute_key_case_insensitive_extractor,
+                rest_key_case_insensitive_extractor,
+                last_rest_key_case_insensitive_extractor,
+            ]
+            if key_extractors is None
+            else key_extractors
+        )
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def _flatten_subtype(cls, key, objects):
+        if "_subtype_map" not in cls.__dict__:
+            return {}
+        result = dict(cls._subtype_map[key])
+        for valuetype in cls._subtype_map[key].values():
+            result.update(objects[valuetype]._flatten_subtype(key, objects))  # pylint: disable=protected-access
+        return result
+
+    @classmethod
+    def _classify(cls, response, objects):
+        """Check the class _subtype_map for any child classes.
+        We want to ignore any inherited _subtype_maps.
+
+        :param dict response: The initial data
+        :param dict objects: The class objects
+        :returns: The class to be used
+        :rtype: class
+        """
+        for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+            subtype_value = None
+
+            if not isinstance(response, ET.Element):
+                rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+                subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+            else:
+                subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+            if subtype_value:
+                # Try to match base class. Can be class name only
+                # (bug to fix in Autorest to support x-ms-discriminator-name)
+                if cls.__name__ == subtype_value:
+                    return cls
+                flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+                try:
+                    return objects[flatten_mapping_type[subtype_value]]  # type: ignore
+                except KeyError:
+                    _LOGGER.warning(
+                        "Subtype value %s has no mapping, use base class %s.",
+                        subtype_value,
+                        cls.__name__,
+                    )
+                    break
+            else:
+                _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+                break
+        return cls
+
+    @classmethod
+    def _get_rest_key_parts(cls, attr_key):
+        """Get the RestAPI key of this attr, split it and decode part
+        :param str attr_key: Attribute key must be in attribute_map.
+        :returns: A list of RestAPI part
+        :rtype: list
+        """
+        rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+        return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+    """This decode a key in an _attribute_map to the actual key we want to look at
+    inside the received data.
+
+    :param str key: A key string from the generated code
+    :returns: The decoded key
+    :rtype: str
+    """
+    return key.replace("\\.", ".")
+
+
+class Serializer:  # pylint: disable=too-many-public-methods
+    """Request object model serializer."""
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+    days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+    months = {
+        1: "Jan",
+        2: "Feb",
+        3: "Mar",
+        4: "Apr",
+        5: "May",
+        6: "Jun",
+        7: "Jul",
+        8: "Aug",
+        9: "Sep",
+        10: "Oct",
+        11: "Nov",
+        12: "Dec",
+    }
+    validation = {
+        "min_length": lambda x, y: len(x) < y,
+        "max_length": lambda x, y: len(x) > y,
+        "minimum": lambda x, y: x < y,
+        "maximum": lambda x, y: x > y,
+        "minimum_ex": lambda x, y: x <= y,
+        "maximum_ex": lambda x, y: x >= y,
+        "min_items": lambda x, y: len(x) < y,
+        "max_items": lambda x, y: len(x) > y,
+        "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+        "unique": lambda x, y: len(x) != len(set(x)),
+        "multiple": lambda x, y: x % y != 0,
+    }
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.serialize_type = {
+            "iso-8601": Serializer.serialize_iso,
+            "rfc-1123": Serializer.serialize_rfc,
+            "unix-time": Serializer.serialize_unix,
+            "duration": Serializer.serialize_duration,
+            "date": Serializer.serialize_date,
+            "time": Serializer.serialize_time,
+            "decimal": Serializer.serialize_decimal,
+            "long": Serializer.serialize_long,
+            "bytearray": Serializer.serialize_bytearray,
+            "base64": Serializer.serialize_base64,
+            "object": self.serialize_object,
+            "[]": self.serialize_iter,
+            "{}": self.serialize_dict,
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_transformer = full_restapi_key_transformer
+        self.client_side_validation = True
+
+    def _serialize(  # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+        self, target_obj, data_type=None, **kwargs
+    ):
+        """Serialize data into a string according to type.
+
+        :param object target_obj: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, dict
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        """
+        key_transformer = kwargs.get("key_transformer", self.key_transformer)
+        keep_readonly = kwargs.get("keep_readonly", False)
+        if target_obj is None:
+            return None
+
+        attr_name = None
+        class_name = target_obj.__class__.__name__
+
+        if data_type:
+            return self.serialize_data(target_obj, data_type, **kwargs)
+
+        if not hasattr(target_obj, "_attribute_map"):
+            data_type = type(target_obj).__name__
+            if data_type in self.basic_types.values():
+                return self.serialize_data(target_obj, data_type, **kwargs)
+
+        # Force "is_xml" kwargs if we detect a XML model
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+        serialized = {}
+        if is_xml_model_serialization:
+            serialized = target_obj._create_xml_node()  # pylint: disable=protected-access
+        try:
+            attributes = target_obj._attribute_map  # pylint: disable=protected-access
+            for attr, attr_desc in attributes.items():
+                attr_name = attr
+                if not keep_readonly and target_obj._validation.get(  # pylint: disable=protected-access
+                    attr_name, {}
+                ).get("readonly", False):
+                    continue
+
+                if attr_name == "additional_properties" and attr_desc["key"] == "":
+                    if target_obj.additional_properties is not None:
+                        serialized.update(target_obj.additional_properties)
+                    continue
+                try:
+
+                    orig_attr = getattr(target_obj, attr)
+                    if is_xml_model_serialization:
+                        pass  # Don't provide "transformer" for XML for now. Keep "orig_attr"
+                    else:  # JSON
+                        keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+                        keys = keys if isinstance(keys, list) else [keys]
+
+                    kwargs["serialization_ctxt"] = attr_desc
+                    new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+                    if is_xml_model_serialization:
+                        xml_desc = attr_desc.get("xml", {})
+                        xml_name = xml_desc.get("name", attr_desc["key"])
+                        xml_prefix = xml_desc.get("prefix", None)
+                        xml_ns = xml_desc.get("ns", None)
+                        if xml_desc.get("attr", False):
+                            if xml_ns:
+                                ET.register_namespace(xml_prefix, xml_ns)
+                                xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+                            serialized.set(xml_name, new_attr)  # type: ignore
+                            continue
+                        if xml_desc.get("text", False):
+                            serialized.text = new_attr  # type: ignore
+                            continue
+                        if isinstance(new_attr, list):
+                            serialized.extend(new_attr)  # type: ignore
+                        elif isinstance(new_attr, ET.Element):
+                            # If the down XML has no XML/Name,
+                            # we MUST replace the tag with the local tag. But keeping the namespaces.
+                            if "name" not in getattr(orig_attr, "_xml_map", {}):
+                                splitted_tag = new_attr.tag.split("}")
+                                if len(splitted_tag) == 2:  # Namespace
+                                    new_attr.tag = "}".join([splitted_tag[0], xml_name])
+                                else:
+                                    new_attr.tag = xml_name
+                            serialized.append(new_attr)  # type: ignore
+                        else:  # That's a basic type
+                            # Integrate namespace if necessary
+                            local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+                            local_node.text = str(new_attr)
+                            serialized.append(local_node)  # type: ignore
+                    else:  # JSON
+                        for k in reversed(keys):  # type: ignore
+                            new_attr = {k: new_attr}
+
+                        _new_attr = new_attr
+                        _serialized = serialized
+                        for k in keys:  # type: ignore
+                            if k not in _serialized:
+                                _serialized.update(_new_attr)  # type: ignore
+                            _new_attr = _new_attr[k]  # type: ignore
+                            _serialized = _serialized[k]
+                except ValueError as err:
+                    if isinstance(err, SerializationError):
+                        raise
+
+        except (AttributeError, KeyError, TypeError) as err:
+            msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+            raise SerializationError(msg) from err
+        return serialized
+
+    def body(self, data, data_type, **kwargs):
+        """Serialize data intended for a request body.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: dict
+        :raises SerializationError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized request body
+        """
+
+        # Just in case this is a dict
+        internal_data_type_str = data_type.strip("[]{}")
+        internal_data_type = self.dependencies.get(internal_data_type_str, None)
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            if internal_data_type and issubclass(internal_data_type, Model):
+                is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+            else:
+                is_xml_model_serialization = False
+        if internal_data_type and not isinstance(internal_data_type, Enum):
+            try:
+                deserializer = Deserializer(self.dependencies)
+                # Since it's on serialization, it's almost sure that format is not JSON REST
+                # We're not able to deal with additional properties for now.
+                deserializer.additional_properties_detection = False
+                if is_xml_model_serialization:
+                    deserializer.key_extractors = [  # type: ignore
+                        attribute_key_case_insensitive_extractor,
+                    ]
+                else:
+                    deserializer.key_extractors = [
+                        rest_key_case_insensitive_extractor,
+                        attribute_key_case_insensitive_extractor,
+                        last_rest_key_case_insensitive_extractor,
+                    ]
+                data = deserializer._deserialize(data_type, data)  # pylint: disable=protected-access
+            except DeserializationError as err:
+                raise SerializationError("Unable to build a model: " + str(err)) from err
+
+        return self._serialize(data, data_type, **kwargs)
+
+    def url(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL path.
+
+        :param str name: The name of the URL path parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :returns: The serialized URL path
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        """
+        try:
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+                output = output.replace("{", quote("{")).replace("}", quote("}"))
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return output
+
+    def query(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL query.
+
+        :param str name: The name of the query parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, list
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized query parameter
+        """
+        try:
+            # Treat the list aside, since we don't want to encode the div separator
+            if data_type.startswith("["):
+                internal_data_type = data_type[1:-1]
+                do_quote = not kwargs.get("skip_quote", False)
+                return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+            # Not a list, regular serialization
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def header(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a request header.
+
+        :param str name: The name of the header.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized header
+        """
+        try:
+            if data_type in ["[str]"]:
+                data = ["" if d is None else d for d in data]
+
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def serialize_data(self, data, data_type, **kwargs):
+        """Serialize generic data according to supplied data type.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :raises AttributeError: if required data is None.
+        :raises ValueError: if data is None
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        :rtype: str, int, float, bool, dict, list
+        """
+        if data is None:
+            raise ValueError("No value for given attribute")
+
+        try:
+            if data is CoreNull:
+                return None
+            if data_type in self.basic_types.values():
+                return self.serialize_basic(data, data_type, **kwargs)
+
+            if data_type in self.serialize_type:
+                return self.serialize_type[data_type](data, **kwargs)
+
+            # If dependencies is empty, try with current data class
+            # It has to be a subclass of Enum anyway
+            enum_type = self.dependencies.get(data_type, data.__class__)
+            if issubclass(enum_type, Enum):
+                return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.serialize_type:
+                return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+        except (ValueError, TypeError) as err:
+            msg = "Unable to serialize value: {!r} as type: {!r}."
+            raise SerializationError(msg.format(data, data_type)) from err
+        return self._serialize(data, **kwargs)
+
+    @classmethod
+    def _get_custom_serializers(cls, data_type, **kwargs):  # pylint: disable=inconsistent-return-statements
+        custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+        if custom_serializer:
+            return custom_serializer
+        if kwargs.get("is_xml", False):
+            return cls._xml_basic_types_serializers.get(data_type)
+
+    @classmethod
+    def serialize_basic(cls, data, data_type, **kwargs):
+        """Serialize basic builting data type.
+        Serializes objects to str, int, float or bool.
+
+        Possible kwargs:
+        - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+        - is_xml bool : If set, use xml_basic_types_serializers
+
+        :param obj data: Object to be serialized.
+        :param str data_type: Type of object in the iterable.
+        :rtype: str, int, float, bool
+        :return: serialized object
+        """
+        custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+        if custom_serializer:
+            return custom_serializer(data)
+        if data_type == "str":
+            return cls.serialize_unicode(data)
+        return eval(data_type)(data)  # nosec # pylint: disable=eval-used
+
+    @classmethod
+    def serialize_unicode(cls, data):
+        """Special handling for serializing unicode strings in Py2.
+        Encode to UTF-8 if unicode, otherwise handle as a str.
+
+        :param str data: Object to be serialized.
+        :rtype: str
+        :return: serialized object
+        """
+        try:  # If I received an enum, return its value
+            return data.value
+        except AttributeError:
+            pass
+
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                # Don't change it, JSON and XML ElementTree are totally able
+                # to serialize correctly u'' strings
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    def serialize_iter(self, data, iter_type, div=None, **kwargs):
+        """Serialize iterable.
+
+        Supported kwargs:
+        - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+          serialization_ctxt['type'] should be same as data_type.
+        - is_xml bool : If set, serialize as XML
+
+        :param list data: Object to be serialized.
+        :param str iter_type: Type of object in the iterable.
+        :param str div: If set, this str will be used to combine the elements
+         in the iterable into a combined string. Default is 'None'.
+        Defaults to False.
+        :rtype: list, str
+        :return: serialized iterable
+        """
+        if isinstance(data, str):
+            raise SerializationError("Refuse str type as a valid iter type.")
+
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        is_xml = kwargs.get("is_xml", False)
+
+        serialized = []
+        for d in data:
+            try:
+                serialized.append(self.serialize_data(d, iter_type, **kwargs))
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized.append(None)
+
+        if kwargs.get("do_quote", False):
+            serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+        if div:
+            serialized = ["" if s is None else str(s) for s in serialized]
+            serialized = div.join(serialized)
+
+        if "xml" in serialization_ctxt or is_xml:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt.get("xml", {})
+            xml_name = xml_desc.get("name")
+            if not xml_name:
+                xml_name = serialization_ctxt["key"]
+
+            # Create a wrap node if necessary (use the fact that Element and list have "append")
+            is_wrapped = xml_desc.get("wrapped", False)
+            node_name = xml_desc.get("itemsName", xml_name)
+            if is_wrapped:
+                final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            else:
+                final_result = []
+            # All list elements to "local_node"
+            for el in serialized:
+                if isinstance(el, ET.Element):
+                    el_node = el
+                else:
+                    el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+                    if el is not None:  # Otherwise it writes "None" :-p
+                        el_node.text = str(el)
+                final_result.append(el_node)
+            return final_result
+        return serialized
+
+    def serialize_dict(self, attr, dict_type, **kwargs):
+        """Serialize a dictionary of objects.
+
+        :param dict attr: Object to be serialized.
+        :param str dict_type: Type of object in the dictionary.
+        :rtype: dict
+        :return: serialized dictionary
+        """
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        serialized = {}
+        for key, value in attr.items():
+            try:
+                serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized[self.serialize_unicode(key)] = None
+
+        if "xml" in serialization_ctxt:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt["xml"]
+            xml_name = xml_desc["name"]
+
+            final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            for key, value in serialized.items():
+                ET.SubElement(final_result, key).text = value
+            return final_result
+
+        return serialized
+
+    def serialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Serialize a generic object.
+        This will be handled as a dictionary. If object passed in is not
+        a basic type (str, int, float, dict, list) it will simply be
+        cast to str.
+
+        :param dict attr: Object to be serialized.
+        :rtype: dict or str
+        :return: serialized object
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            return attr
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+        if obj_type is _long_type:
+            return self.serialize_long(attr)
+        if obj_type is str:
+            return self.serialize_unicode(attr)
+        if obj_type is datetime.datetime:
+            return self.serialize_iso(attr)
+        if obj_type is datetime.date:
+            return self.serialize_date(attr)
+        if obj_type is datetime.time:
+            return self.serialize_time(attr)
+        if obj_type is datetime.timedelta:
+            return self.serialize_duration(attr)
+        if obj_type is decimal.Decimal:
+            return self.serialize_decimal(attr)
+
+        # If it's a model or I know this dependency, serialize as a Model
+        if obj_type in self.dependencies.values() or isinstance(attr, Model):
+            return self._serialize(attr)
+
+        if obj_type == dict:
+            serialized = {}
+            for key, value in attr.items():
+                try:
+                    serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+                except ValueError:
+                    serialized[self.serialize_unicode(key)] = None
+            return serialized
+
+        if obj_type == list:
+            serialized = []
+            for obj in attr:
+                try:
+                    serialized.append(self.serialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return serialized
+        return str(attr)
+
+    @staticmethod
+    def serialize_enum(attr, enum_obj=None):
+        try:
+            result = attr.value
+        except AttributeError:
+            result = attr
+        try:
+            enum_obj(result)  # type: ignore
+            return result
+        except ValueError as exc:
+            for enum_value in enum_obj:  # type: ignore
+                if enum_value.value.lower() == str(attr).lower():
+                    return enum_value.value
+            error = "{!r} is not valid value for enum {!r}"
+            raise SerializationError(error.format(attr, enum_obj)) from exc
+
+    @staticmethod
+    def serialize_bytearray(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize bytearray into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        return b64encode(attr).decode()
+
+    @staticmethod
+    def serialize_base64(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize str into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        encoded = b64encode(attr).decode("ascii")
+        return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+    @staticmethod
+    def serialize_decimal(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Decimal object to float.
+
+        :param decimal attr: Object to be serialized.
+        :rtype: float
+        :return: serialized decimal
+        """
+        return float(attr)
+
+    @staticmethod
+    def serialize_long(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize long (Py2) or int (Py3).
+
+        :param int attr: Object to be serialized.
+        :rtype: int/long
+        :return: serialized long
+        """
+        return _long_type(attr)
+
+    @staticmethod
+    def serialize_date(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Date object into ISO-8601 formatted string.
+
+        :param Date attr: Object to be serialized.
+        :rtype: str
+        :return: serialized date
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_date(attr)
+        t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+        return t
+
+    @staticmethod
+    def serialize_time(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Time object into ISO-8601 formatted string.
+
+        :param datetime.time attr: Object to be serialized.
+        :rtype: str
+        :return: serialized time
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_time(attr)
+        t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+        if attr.microsecond:
+            t += ".{:02}".format(attr.microsecond)
+        return t
+
+    @staticmethod
+    def serialize_duration(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize TimeDelta object into ISO-8601 formatted string.
+
+        :param TimeDelta attr: Object to be serialized.
+        :rtype: str
+        :return: serialized duration
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_duration(attr)
+        return isodate.duration_isoformat(attr)
+
+    @staticmethod
+    def serialize_rfc(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into RFC-1123 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises TypeError: if format invalid.
+        :return: serialized rfc
+        """
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+        except AttributeError as exc:
+            raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+        return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+            Serializer.days[utc.tm_wday],
+            utc.tm_mday,
+            Serializer.months[utc.tm_mon],
+            utc.tm_year,
+            utc.tm_hour,
+            utc.tm_min,
+            utc.tm_sec,
+        )
+
+    @staticmethod
+    def serialize_iso(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into ISO-8601 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises SerializationError: if format invalid.
+        :return: serialized iso
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_datetime(attr)
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+            if utc.tm_year > 9999 or utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+
+            microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+            if microseconds:
+                microseconds = "." + microseconds
+            date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+                utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+            )
+            return date + microseconds + "Z"
+        except (ValueError, OverflowError) as err:
+            msg = "Unable to serialize datetime object."
+            raise SerializationError(msg) from err
+        except AttributeError as err:
+            msg = "ISO-8601 object must be valid Datetime object."
+            raise TypeError(msg) from err
+
+    @staticmethod
+    def serialize_unix(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: int
+        :raises SerializationError: if format invalid
+        :return: serialied unix
+        """
+        if isinstance(attr, int):
+            return attr
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            return int(calendar.timegm(attr.utctimetuple()))
+        except AttributeError as exc:
+            raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        # Need the cast, as for some reasons "split" is typed as list[str | Any]
+        dict_keys = cast(List[str], _FLATTEN.split(key))
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = working_data.get(working_key, data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor(  # pylint: disable=unused-argument, inconsistent-return-statements
+    attr, attr_desc, data
+):
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        dict_keys = _FLATTEN.split(key)
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    if working_data:
+        return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    This is the case insensitive version of "last_rest_key_extractor"
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+    return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+    found_key = None
+    lower_attr = attr.lower()
+    for key in data:
+        if lower_attr == key.lower():
+            found_key = key
+            break
+
+    return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+    """Given an internal type XML description, extract correct XML name with namespace.
+
+    :param dict internal_type: An model type
+    :rtype: tuple
+    :returns: A tuple XML name + namespace dict
+    """
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+    xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+    xml_ns = internal_type_xml_map.get("ns", None)
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+    return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument,too-many-return-statements
+    if isinstance(data, dict):
+        return None
+
+    # Test if this model is XML ready first
+    if not isinstance(data, ET.Element):
+        return None
+
+    xml_desc = attr_desc.get("xml", {})
+    xml_name = xml_desc.get("name", attr_desc["key"])
+
+    # Look for a children
+    is_iter_type = attr_desc["type"].startswith("[")
+    is_wrapped = xml_desc.get("wrapped", False)
+    internal_type = attr_desc.get("internalType", None)
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+    # Integrate namespace if necessary
+    xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+    # If it's an attribute, that's simple
+    if xml_desc.get("attr", False):
+        return data.get(xml_name)
+
+    # If it's x-ms-text, that's simple too
+    if xml_desc.get("text", False):
+        return data.text
+
+    # Scenario where I take the local name:
+    # - Wrapped node
+    # - Internal type is an enum (considered basic types)
+    # - Internal type has no XML/Name node
+    if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+        children = data.findall(xml_name)
+    # If internal type has a local name and it's not a list, I use that name
+    elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+        xml_name = _extract_name_from_internal_type(internal_type)
+        children = data.findall(xml_name)
+    # That's an array
+    else:
+        if internal_type:  # Complex type, ignore itemsName and use the complex type name
+            items_name = _extract_name_from_internal_type(internal_type)
+        else:
+            items_name = xml_desc.get("itemsName", xml_name)
+        children = data.findall(items_name)
+
+    if len(children) == 0:
+        if is_iter_type:
+            if is_wrapped:
+                return None  # is_wrapped no node, we want None
+            return []  # not wrapped, assume empty list
+        return None  # Assume it's not there, maybe an optional node.
+
+    # If is_iter_type and not wrapped, return all found children
+    if is_iter_type:
+        if not is_wrapped:
+            return children
+        # Iter and wrapped, should have found one node only (the wrap one)
+        if len(children) != 1:
+            raise DeserializationError(
+                "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(  # pylint: disable=line-too-long
+                    xml_name
+                )
+            )
+        return list(children[0])  # Might be empty list and that's ok.
+
+    # Here it's not a itertype, we should have found one element only or empty
+    if len(children) > 1:
+        raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+    return children[0]
+
+
+class Deserializer:
+    """Response object model deserializer.
+
+    :param dict classes: Class type dictionary for deserializing complex types.
+    :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+    """
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.deserialize_type = {
+            "iso-8601": Deserializer.deserialize_iso,
+            "rfc-1123": Deserializer.deserialize_rfc,
+            "unix-time": Deserializer.deserialize_unix,
+            "duration": Deserializer.deserialize_duration,
+            "date": Deserializer.deserialize_date,
+            "time": Deserializer.deserialize_time,
+            "decimal": Deserializer.deserialize_decimal,
+            "long": Deserializer.deserialize_long,
+            "bytearray": Deserializer.deserialize_bytearray,
+            "base64": Deserializer.deserialize_base64,
+            "object": self.deserialize_object,
+            "[]": self.deserialize_iter,
+            "{}": self.deserialize_dict,
+        }
+        self.deserialize_expected_types = {
+            "duration": (isodate.Duration, datetime.timedelta),
+            "iso-8601": (datetime.datetime),
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_extractors = [rest_key_extractor, xml_key_extractor]
+        # Additional properties only works if the "rest_key_extractor" is used to
+        # extract the keys. Making it to work whatever the key extractor is too much
+        # complicated, with no real scenario for now.
+        # So adding a flag to disable additional properties detection. This flag should be
+        # used if your expect the deserialization to NOT come from a JSON REST syntax.
+        # Otherwise, result are unexpected
+        self.additional_properties_detection = True
+
+    def __call__(self, target_obj, response_data, content_type=None):
+        """Call the deserializer to process a REST response.
+
+        :param str target_obj: Target data type to deserialize to.
+        :param requests.Response response_data: REST response object.
+        :param str content_type: Swagger "produces" if available.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        data = self._unpack_content(response_data, content_type)
+        return self._deserialize(target_obj, data)
+
+    def _deserialize(self, target_obj, data):  # pylint: disable=inconsistent-return-statements
+        """Call the deserializer on a model.
+
+        Data needs to be already deserialized as JSON or XML ElementTree
+
+        :param str target_obj: Target data type to deserialize to.
+        :param object data: Object to deserialize.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        # This is already a model, go recursive just in case
+        if hasattr(data, "_attribute_map"):
+            constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+            try:
+                for attr, mapconfig in data._attribute_map.items():  # pylint: disable=protected-access
+                    if attr in constants:
+                        continue
+                    value = getattr(data, attr)
+                    if value is None:
+                        continue
+                    local_type = mapconfig["type"]
+                    internal_data_type = local_type.strip("[]{}")
+                    if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+                        continue
+                    setattr(data, attr, self._deserialize(local_type, value))
+                return data
+            except AttributeError:
+                return
+
+        response, class_name = self._classify_target(target_obj, data)
+
+        if isinstance(response, str):
+            return self.deserialize_data(data, response)
+        if isinstance(response, type) and issubclass(response, Enum):
+            return self.deserialize_enum(data, response)
+
+        if data is None or data is CoreNull:
+            return data
+        try:
+            attributes = response._attribute_map  # type: ignore # pylint: disable=protected-access
+            d_attrs = {}
+            for attr, attr_desc in attributes.items():
+                # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+                if attr == "additional_properties" and attr_desc["key"] == "":
+                    continue
+                raw_value = None
+                # Enhance attr_desc with some dynamic data
+                attr_desc = attr_desc.copy()  # Do a copy, do not change the real one
+                internal_data_type = attr_desc["type"].strip("[]{}")
+                if internal_data_type in self.dependencies:
+                    attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+                for key_extractor in self.key_extractors:
+                    found_value = key_extractor(attr, attr_desc, data)
+                    if found_value is not None:
+                        if raw_value is not None and raw_value != found_value:
+                            msg = (
+                                "Ignoring extracted value '%s' from %s for key '%s'"
+                                " (duplicate extraction, follow extractors order)"
+                            )
+                            _LOGGER.warning(msg, found_value, key_extractor, attr)
+                            continue
+                        raw_value = found_value
+
+                value = self.deserialize_data(raw_value, attr_desc["type"])
+                d_attrs[attr] = value
+        except (AttributeError, TypeError, KeyError) as err:
+            msg = "Unable to deserialize to object: " + class_name  # type: ignore
+            raise DeserializationError(msg) from err
+        additional_properties = self._build_additional_properties(attributes, data)
+        return self._instantiate_model(response, d_attrs, additional_properties)
+
+    def _build_additional_properties(self, attribute_map, data):
+        if not self.additional_properties_detection:
+            return None
+        if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+            # Check empty string. If it's not empty, someone has a real "additionalProperties"
+            return None
+        if isinstance(data, ET.Element):
+            data = {el.tag: el.text for el in data}
+
+        known_keys = {
+            _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+            for desc in attribute_map.values()
+            if desc["key"] != ""
+        }
+        present_keys = set(data.keys())
+        missing_keys = present_keys - known_keys
+        return {key: data[key] for key in missing_keys}
+
+    def _classify_target(self, target, data):
+        """Check to see whether the deserialization target object can
+        be classified into a subclass.
+        Once classification has been determined, initialize object.
+
+        :param str target: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :return: The classified target object and its class name.
+        :rtype: tuple
+        """
+        if target is None:
+            return None, None
+
+        if isinstance(target, str):
+            try:
+                target = self.dependencies[target]
+            except KeyError:
+                return target, target
+
+        try:
+            target = target._classify(data, self.dependencies)  # type: ignore # pylint: disable=protected-access
+        except AttributeError:
+            pass  # Target is not a Model, no classify
+        return target, target.__class__.__name__  # type: ignore
+
+    def failsafe_deserialize(self, target_obj, data, content_type=None):
+        """Ignores any errors encountered in deserialization,
+        and falls back to not deserializing the object. Recommended
+        for use in error deserialization, as we want to return the
+        HttpResponseError to users, and not have them deal with
+        a deserialization error.
+
+        :param str target_obj: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :param str content_type: Swagger "produces" if available.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        try:
+            return self(target_obj, data, content_type=content_type)
+        except:  # pylint: disable=bare-except
+            _LOGGER.debug(
+                "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+            )
+            return None
+
+    @staticmethod
+    def _unpack_content(raw_data, content_type=None):
+        """Extract the correct structure for deserialization.
+
+        If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+        if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+        If not a pipeline response and raw_data is bytes or string, use content-type
+        to decode it. If no content-type, try JSON.
+
+        If raw_data is something else, bypass all logic and return it directly.
+
+        :param obj raw_data: Data to be processed.
+        :param str content_type: How to parse if raw_data is a string/bytes.
+        :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+        :raises UnicodeDecodeError: If bytes is not UTF8
+        :rtype: object
+        :return: Unpacked content.
+        """
+        # Assume this is enough to detect a Pipeline Response without importing it
+        context = getattr(raw_data, "context", {})
+        if context:
+            if RawDeserializer.CONTEXT_NAME in context:
+                return context[RawDeserializer.CONTEXT_NAME]
+            raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+        # Assume this is enough to recognize universal_http.ClientResponse without importing it
+        if hasattr(raw_data, "body"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+        # Assume this enough to recognize requests.Response without importing it.
+        if hasattr(raw_data, "_content_consumed"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+        if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+            return RawDeserializer.deserialize_from_text(raw_data, content_type)  # type: ignore
+        return raw_data
+
+    def _instantiate_model(self, response, attrs, additional_properties=None):
+        """Instantiate a response model passing in deserialized args.
+
+        :param Response response: The response model class.
+        :param dict attrs: The deserialized response attributes.
+        :param dict additional_properties: Additional properties to be set.
+        :rtype: Response
+        :return: The instantiated response model.
+        """
+        if callable(response):
+            subtype = getattr(response, "_subtype_map", {})
+            try:
+                readonly = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("readonly")
+                ]
+                const = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("constant")
+                ]
+                kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+                response_obj = response(**kwargs)
+                for attr in readonly:
+                    setattr(response_obj, attr, attrs.get(attr))
+                if additional_properties:
+                    response_obj.additional_properties = additional_properties  # type: ignore
+                return response_obj
+            except TypeError as err:
+                msg = "Unable to deserialize {} into model {}. ".format(kwargs, response)  # type: ignore
+                raise DeserializationError(msg + str(err)) from err
+        else:
+            try:
+                for attr, value in attrs.items():
+                    setattr(response, attr, value)
+                return response
+            except Exception as exp:
+                msg = "Unable to populate response model. "
+                msg += "Type: {}, Error: {}".format(type(response), exp)
+                raise DeserializationError(msg) from exp
+
+    def deserialize_data(self, data, data_type):  # pylint: disable=too-many-return-statements
+        """Process data for deserialization according to data type.
+
+        :param str data: The response string to be deserialized.
+        :param str data_type: The type to deserialize to.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        if data is None:
+            return data
+
+        try:
+            if not data_type:
+                return data
+            if data_type in self.basic_types.values():
+                return self.deserialize_basic(data, data_type)
+            if data_type in self.deserialize_type:
+                if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+                    return data
+
+                is_a_text_parsing_type = lambda x: x not in [  # pylint: disable=unnecessary-lambda-assignment
+                    "object",
+                    "[]",
+                    r"{}",
+                ]
+                if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+                    return None
+                data_val = self.deserialize_type[data_type](data)
+                return data_val
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.deserialize_type:
+                return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+            obj_type = self.dependencies[data_type]
+            if issubclass(obj_type, Enum):
+                if isinstance(data, ET.Element):
+                    data = data.text
+                return self.deserialize_enum(data, obj_type)
+
+        except (ValueError, TypeError, AttributeError) as err:
+            msg = "Unable to deserialize response data."
+            msg += " Data: {}, {}".format(data, data_type)
+            raise DeserializationError(msg) from err
+        return self._deserialize(obj_type, data)
+
+    def deserialize_iter(self, attr, iter_type):
+        """Deserialize an iterable.
+
+        :param list attr: Iterable to be deserialized.
+        :param str iter_type: The type of object in the iterable.
+        :return: Deserialized iterable.
+        :rtype: list
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):  # If I receive an element here, get the children
+            attr = list(attr)
+        if not isinstance(attr, (list, set)):
+            raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+        return [self.deserialize_data(a, iter_type) for a in attr]
+
+    def deserialize_dict(self, attr, dict_type):
+        """Deserialize a dictionary.
+
+        :param dict/list attr: Dictionary to be deserialized. Also accepts
+         a list of key, value pairs.
+        :param str dict_type: The object type of the items in the dictionary.
+        :return: Deserialized dictionary.
+        :rtype: dict
+        """
+        if isinstance(attr, list):
+            return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+        if isinstance(attr, ET.Element):
+            # Transform <Key>value</Key> into {"Key": "value"}
+            attr = {el.tag: el.text for el in attr}
+        return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+    def deserialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Deserialize a generic object.
+        This will be handled as a dictionary.
+
+        :param dict attr: Dictionary to be deserialized.
+        :return: Deserialized object.
+        :rtype: dict
+        :raises TypeError: if non-builtin datatype encountered.
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            # Do no recurse on XML, just return the tree as-is
+            return attr
+        if isinstance(attr, str):
+            return self.deserialize_basic(attr, "str")
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.deserialize_basic(attr, self.basic_types[obj_type])
+        if obj_type is _long_type:
+            return self.deserialize_long(attr)
+
+        if obj_type == dict:
+            deserialized = {}
+            for key, value in attr.items():
+                try:
+                    deserialized[key] = self.deserialize_object(value, **kwargs)
+                except ValueError:
+                    deserialized[key] = None
+            return deserialized
+
+        if obj_type == list:
+            deserialized = []
+            for obj in attr:
+                try:
+                    deserialized.append(self.deserialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return deserialized
+
+        error = "Cannot deserialize generic object with type: "
+        raise TypeError(error + str(obj_type))
+
+    def deserialize_basic(self, attr, data_type):  # pylint: disable=too-many-return-statements
+        """Deserialize basic builtin data type from string.
+        Will attempt to convert to str, int, float and bool.
+        This function will also accept '1', '0', 'true' and 'false' as
+        valid bool values.
+
+        :param str attr: response string to be deserialized.
+        :param str data_type: deserialization data type.
+        :return: Deserialized basic type.
+        :rtype: str, int, float or bool
+        :raises TypeError: if string format is not valid.
+        """
+        # If we're here, data is supposed to be a basic type.
+        # If it's still an XML node, take the text
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+            if not attr:
+                if data_type == "str":
+                    # None or '', node <a/> is empty string.
+                    return ""
+                # None or '', node <a/> with a strong type is None.
+                # Don't try to model "empty bool" or "empty int"
+                return None
+
+        if data_type == "bool":
+            if attr in [True, False, 1, 0]:
+                return bool(attr)
+            if isinstance(attr, str):
+                if attr.lower() in ["true", "1"]:
+                    return True
+                if attr.lower() in ["false", "0"]:
+                    return False
+            raise TypeError("Invalid boolean value: {}".format(attr))
+
+        if data_type == "str":
+            return self.deserialize_unicode(attr)
+        return eval(data_type)(attr)  # nosec # pylint: disable=eval-used
+
+    @staticmethod
+    def deserialize_unicode(data):
+        """Preserve unicode objects in Python 2, otherwise return data
+        as a string.
+
+        :param str data: response string to be deserialized.
+        :return: Deserialized string.
+        :rtype: str or unicode
+        """
+        # We might be here because we have an enum modeled as string,
+        # and we try to deserialize a partial dict with enum inside
+        if isinstance(data, Enum):
+            return data
+
+        # Consider this is real string
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    @staticmethod
+    def deserialize_enum(data, enum_obj):
+        """Deserialize string into enum object.
+
+        If the string is not a valid enum value it will be returned as-is
+        and a warning will be logged.
+
+        :param str data: Response string to be deserialized. If this value is
+         None or invalid it will be returned as-is.
+        :param Enum enum_obj: Enum object to deserialize to.
+        :return: Deserialized enum object.
+        :rtype: Enum
+        """
+        if isinstance(data, enum_obj) or data is None:
+            return data
+        if isinstance(data, Enum):
+            data = data.value
+        if isinstance(data, int):
+            # Workaround. We might consider remove it in the future.
+            try:
+                return list(enum_obj.__members__.values())[data]
+            except IndexError as exc:
+                error = "{!r} is not a valid index for enum {!r}"
+                raise DeserializationError(error.format(data, enum_obj)) from exc
+        try:
+            return enum_obj(str(data))
+        except ValueError:
+            for enum_value in enum_obj:
+                if enum_value.value.lower() == str(data).lower():
+                    return enum_value
+            # We don't fail anymore for unknown value, we deserialize as a string
+            _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+            return Deserializer.deserialize_unicode(data)
+
+    @staticmethod
+    def deserialize_bytearray(attr):
+        """Deserialize string into bytearray.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized bytearray
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return bytearray(b64decode(attr))  # type: ignore
+
+    @staticmethod
+    def deserialize_base64(attr):
+        """Deserialize base64 encoded string into string.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized base64 string
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        padding = "=" * (3 - (len(attr) + 3) % 4)  # type: ignore
+        attr = attr + padding  # type: ignore
+        encoded = attr.replace("-", "+").replace("_", "/")
+        return b64decode(encoded)
+
+    @staticmethod
+    def deserialize_decimal(attr):
+        """Deserialize string into Decimal object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized decimal
+        :raises DeserializationError: if string format invalid.
+        :rtype: decimal
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            return decimal.Decimal(str(attr))  # type: ignore
+        except decimal.DecimalException as err:
+            msg = "Invalid decimal {}".format(attr)
+            raise DeserializationError(msg) from err
+
+    @staticmethod
+    def deserialize_long(attr):
+        """Deserialize string into long (Py2) or int (Py3).
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized int
+        :rtype: long or int
+        :raises ValueError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return _long_type(attr)  # type: ignore
+
+    @staticmethod
+    def deserialize_duration(attr):
+        """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized duration
+        :rtype: TimeDelta
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            duration = isodate.parse_duration(attr)
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize duration object."
+            raise DeserializationError(msg) from err
+        return duration
+
+    @staticmethod
+    def deserialize_date(attr):
+        """Deserialize ISO-8601 formatted string into Date object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized date
+        :rtype: Date
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+        return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+    @staticmethod
+    def deserialize_time(attr):
+        """Deserialize ISO-8601 formatted string into time object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized time
+        :rtype: datetime.time
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        return isodate.parse_time(attr)
+
+    @staticmethod
+    def deserialize_rfc(attr):
+        """Deserialize RFC-1123 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized RFC datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            parsed_date = email.utils.parsedate_tz(attr)  # type: ignore
+            date_obj = datetime.datetime(
+                *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+            )
+            if not date_obj.tzinfo:
+                date_obj = date_obj.astimezone(tz=TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to rfc datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_iso(attr):
+        """Deserialize ISO-8601 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized ISO datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            attr = attr.upper()  # type: ignore
+            match = Deserializer.valid_date.match(attr)
+            if not match:
+                raise ValueError("Invalid datetime string: " + attr)
+
+            check_decimal = attr.split(".")
+            if len(check_decimal) > 1:
+                decimal_str = ""
+                for digit in check_decimal[1]:
+                    if digit.isdigit():
+                        decimal_str += digit
+                    else:
+                        break
+                if len(decimal_str) > 6:
+                    attr = attr.replace(decimal_str, decimal_str[0:6])
+
+            date_obj = isodate.parse_datetime(attr)
+            test_utc = date_obj.utctimetuple()
+            if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_unix(attr):
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param int attr: Object to be serialized.
+        :return: Deserialized datetime
+        :rtype: Datetime
+        :raises DeserializationError: if format invalid
+        """
+        if isinstance(attr, ET.Element):
+            attr = int(attr.text)  # type: ignore
+        try:
+            attr = int(attr)
+            date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to unix datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/__init__.py
new file mode 100644
index 00000000..c57ce36e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_blob_storage import AzureBlobStorage  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureBlobStorage",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_azure_blob_storage.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_azure_blob_storage.py
new file mode 100644
index 00000000..9a06e367
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_azure_blob_storage.py
@@ -0,0 +1,121 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable
+from typing_extensions import Self
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+
+from .. import models as _models
+from .._serialization import Deserializer, Serializer
+from ._configuration import AzureBlobStorageConfiguration
+from .operations import (
+    AppendBlobOperations,
+    BlobOperations,
+    BlockBlobOperations,
+    ContainerOperations,
+    PageBlobOperations,
+    ServiceOperations,
+)
+
+
+class AzureBlobStorage:  # pylint: disable=client-accepts-api-version-keyword
+    """AzureBlobStorage.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.blob.aio.operations.ServiceOperations
+    :ivar container: ContainerOperations operations
+    :vartype container: azure.storage.blob.aio.operations.ContainerOperations
+    :ivar blob: BlobOperations operations
+    :vartype blob: azure.storage.blob.aio.operations.BlobOperations
+    :ivar page_blob: PageBlobOperations operations
+    :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations
+    :ivar append_blob: AppendBlobOperations operations
+    :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations
+    :ivar block_blob: BlockBlobOperations operations
+    :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", **kwargs: Any
+    ) -> None:
+        self._config = AzureBlobStorageConfiguration(url=url, **kwargs)
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.container = ContainerOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.blob = BlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.page_blob = PageBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.append_blob = AppendBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.block_blob = BlockBlobOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(
+        self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+    ) -> Awaitable[AsyncHttpResponse]:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = await client._send_request(request)
+        <AsyncHttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.AsyncHttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    async def close(self) -> None:
+        await self._client.close()
+
+    async def __aenter__(self) -> Self:
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *exc_details: Any) -> None:
+        await self._client.__aexit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_configuration.py
new file mode 100644
index 00000000..7448ca36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_configuration.py
@@ -0,0 +1,51 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureBlobStorageConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureBlobStorage.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, **kwargs: Any) -> None:
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azureblobstorage/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000..4a5bb832
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/__init__.py
@@ -0,0 +1,35 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._container_operations import ContainerOperations  # type: ignore
+from ._blob_operations import BlobOperations  # type: ignore
+from ._page_blob_operations import PageBlobOperations  # type: ignore
+from ._append_blob_operations import AppendBlobOperations  # type: ignore
+from ._block_blob_operations import BlockBlobOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "ContainerOperations",
+    "BlobOperations",
+    "PageBlobOperations",
+    "AppendBlobOperations",
+    "BlockBlobOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_append_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_append_blob_operations.py
new file mode 100644
index 00000000..4eb79b31
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_append_blob_operations.py
@@ -0,0 +1,740 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._append_blob_operations import (
+    build_append_block_from_url_request,
+    build_append_block_request,
+    build_create_request,
+    build_seal_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class AppendBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`append_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        content_length: int,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create Append Blob operation creates a new append blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["AppendBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_create_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def append_block(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Append Block operation commits a new block of data to the end of an existing append blob.
+        The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to
+        AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _max_size = None
+        _append_position = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+            _max_size = append_position_access_conditions.max_size
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_append_block_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            max_size=_max_size,
+            append_position=_append_position,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-append-offset"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-append-offset")
+        )
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def append_block_from_url(
+        self,
+        source_url: str,
+        content_length: int,
+        source_range: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Append Block operation commits a new block of data to the end of an existing append blob
+        where the contents are read from a source url. The Append Block operation is permitted only if
+        the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on
+        version 2015-02-21 version or later.
+
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _max_size = None
+        _append_position = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+            _max_size = append_position_access_conditions.max_size
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_append_block_from_url_request(
+            url=self._config.url,
+            source_url=source_url,
+            content_length=content_length,
+            source_range=source_range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            max_size=_max_size,
+            append_position=_append_position,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-append-offset"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-append-offset")
+        )
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def seal(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on
+        version 2019-12-12 version or later.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["seal"] = kwargs.pop("comp", _params.pop("comp", "seal"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _append_position = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+
+        _request = build_seal_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            append_position=_append_position,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_blob_operations.py
new file mode 100644
index 00000000..ee46d9ef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_blob_operations.py
@@ -0,0 +1,3211 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._blob_operations import (
+    build_abort_copy_from_url_request,
+    build_acquire_lease_request,
+    build_break_lease_request,
+    build_change_lease_request,
+    build_copy_from_url_request,
+    build_create_snapshot_request,
+    build_delete_immutability_policy_request,
+    build_delete_request,
+    build_download_request,
+    build_get_account_info_request,
+    build_get_properties_request,
+    build_get_tags_request,
+    build_query_request,
+    build_release_lease_request,
+    build_renew_lease_request,
+    build_set_expiry_request,
+    build_set_http_headers_request,
+    build_set_immutability_policy_request,
+    build_set_legal_hold_request,
+    build_set_metadata_request,
+    build_set_tags_request,
+    build_set_tier_request,
+    build_start_copy_from_url_request,
+    build_undelete_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class BlobOperations:  # pylint: disable=too-many-public-methods
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def download(
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        range_get_content_md5: Optional[bool] = None,
+        range_get_content_crc64: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Download operation reads or downloads a blob from the system, including its metadata and
+        properties. You can also call Download to read a snapshot.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param range_get_content_md5: When set to true and specified together with the Range, the
+         service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB
+         in size. Default value is None.
+        :type range_get_content_md5: bool
+        :param range_get_content_crc64: When set to true and specified together with the Range, the
+         service returns the CRC64 hash for the range, as long as the range is less than or equal to 4
+         MB in size. Default value is None.
+        :type range_get_content_crc64: bool
+        :param structured_body_type: Specifies the response content should be returned as a structured
+         message and specifies the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_download_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            range_get_content_md5=range_get_content_md5,
+            range_get_content_crc64=range_get_content_crc64,
+            structured_body_type=structured_body_type,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-creation-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-creation-time")
+            )
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+            response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+            response_headers["x-ms-is-current-version"] = self._deserialize(
+                "bool", response.headers.get("x-ms-is-current-version")
+            )
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+            response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+            response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+            response_headers["x-ms-last-access-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-last-access-time")
+            )
+            response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+            )
+            response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+                "str", response.headers.get("x-ms-immutability-policy-mode")
+            )
+            response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+            response_headers["x-ms-structured-content-length"] = self._deserialize(
+                "int", response.headers.get("x-ms-structured-content-length")
+            )
+
+        if response.status_code == 206:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-creation-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-creation-time")
+            )
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+            response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-content-crc64"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-content-crc64")
+            )
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+            response_headers["x-ms-is-current-version"] = self._deserialize(
+                "bool", response.headers.get("x-ms-is-current-version")
+            )
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+            response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+            response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+            response_headers["x-ms-last-access-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-last-access-time")
+            )
+            response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+            )
+            response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+                "str", response.headers.get("x-ms-immutability-policy-mode")
+            )
+            response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+            response_headers["x-ms-structured-content-length"] = self._deserialize(
+                "int", response.headers.get("x-ms-structured-content-length")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and
+        system properties for the blob. It does not return the content of the blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-creation-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-creation-time")
+        )
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+        response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+        response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-incremental-copy"] = self._deserialize(
+            "bool", response.headers.get("x-ms-incremental-copy")
+        )
+        response_headers["x-ms-copy-destination-snapshot"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-destination-snapshot")
+        )
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier"))
+        response_headers["x-ms-access-tier-inferred"] = self._deserialize(
+            "bool", response.headers.get("x-ms-access-tier-inferred")
+        )
+        response_headers["x-ms-archive-status"] = self._deserialize("str", response.headers.get("x-ms-archive-status"))
+        response_headers["x-ms-access-tier-change-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-access-tier-change-time")
+        )
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["x-ms-is-current-version"] = self._deserialize(
+            "bool", response.headers.get("x-ms-is-current-version")
+        )
+        response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+        response_headers["x-ms-expiry-time"] = self._deserialize("rfc-1123", response.headers.get("x-ms-expiry-time"))
+        response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+        response_headers["x-ms-rehydrate-priority"] = self._deserialize(
+            "str", response.headers.get("x-ms-rehydrate-priority")
+        )
+        response_headers["x-ms-last-access-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-last-access-time")
+        )
+        response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+        )
+        response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+            "str", response.headers.get("x-ms-immutability-policy-mode")
+        )
+        response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_delete_type: Literal["Permanent"] = "Permanent",
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is
+        permanently removed from the storage account. If the storage account's soft delete feature is
+        enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible
+        immediately. However, the blob service retains the blob or snapshot for the number of days
+        specified by the DeleteRetentionPolicy section of [Storage service properties]
+        (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's
+        data is permanently removed from the storage account. Note that you continue to be charged for
+        the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and
+        specify the "include=deleted" query parameter to discover which blobs and snapshots have been
+        soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other
+        operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code
+        of 404 (ResourceNotFound).
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the
+         following two options: include: Delete the base blob and all of its snapshots. only: Delete
+         only the blob's snapshots and not the blob itself. Known values are: "include" and "only".
+         Default value is None.
+        :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_delete_type: Optional.  Only possible value is 'permanent', which specifies to
+         permanently delete a blob if blob soft delete is enabled. Known values are "Permanent" and
+         None. Default value is "Permanent".
+        :type blob_delete_type: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            delete_snapshots=delete_snapshots,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_delete_type=blob_delete_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def undelete(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a blob that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_expiry(
+        self,
+        expiry_options: Union[str, _models.BlobExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set HTTP Headers operation sets system properties on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_cache_control = None
+        _blob_content_type = None
+        _blob_content_md5 = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _blob_content_disposition = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_http_headers_request(
+            url=self._config.url,
+            timeout=timeout,
+            blob_cache_control=_blob_cache_control,
+            blob_content_type=_blob_content_type,
+            blob_content_md5=_blob_content_md5,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_content_disposition=_blob_content_disposition,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_immutability_policy(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Immutability Policy operation sets the immutability policy on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_immutability_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            if_unmodified_since=_if_unmodified_since,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+        )
+        response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+            "str", response.headers.get("x-ms-immutability-policy-mode")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete_immutability_policy(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Delete Immutability Policy operation deletes the immutability policy on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_delete_immutability_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_legal_hold(
+        self,
+        legal_hold: bool,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Legal Hold operation sets a legal hold on the blob.
+
+        :param legal_hold: Specified if a legal hold should be set on the blob. Required.
+        :type legal_hold: bool
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["legalhold"] = kwargs.pop("comp", _params.pop("comp", "legalhold"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_legal_hold_request(
+            url=self._config.url,
+            legal_hold=legal_hold,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_metadata(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or
+        more name-value pairs.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def release_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def renew_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def change_lease(
+        self,
+        lease_id: str,
+        proposed_lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Required.
+        :type proposed_lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            proposed_lease_id=proposed_lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def break_lease(
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def create_snapshot(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create Snapshot operation creates a read-only snapshot of a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_snapshot_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def start_copy_from_url(
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        seal_blob: Optional[bool] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Start Copy From URL operation copies a blob or an internet resource to a new blob.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived
+         blob. Known values are: "High" and "Standard". Default value is None.
+        :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param seal_blob: Overrides the sealed state of the destination blob.  Service version
+         2019-12-12 and newer. Default value is None.
+        :type seal_blob: bool
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_tags = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_tags = source_modified_access_conditions.source_if_tags
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_start_copy_from_url_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            tier=tier,
+            rehydrate_priority=rehydrate_priority,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_tags=_source_if_tags,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            seal_blob=seal_blob,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def copy_from_url(
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        copy_source_authorization: Optional[str] = None,
+        copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not
+        return a response until the copy is complete.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param copy_source_tags: Optional, default 'replace'.  Indicates if source tags should be
+         copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and
+         "COPY". Default value is None.
+        :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        x_ms_requires_sync: Literal["true"] = kwargs.pop(
+            "x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        _encryption_scope = None
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+
+        _request = build_copy_from_url_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            tier=tier,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            source_content_md5=source_content_md5,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            copy_source_authorization=copy_source_authorization,
+            encryption_scope=_encryption_scope,
+            copy_source_tags=copy_source_tags,
+            x_ms_requires_sync=x_ms_requires_sync,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def abort_copy_from_url(
+        self,
+        copy_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a
+        destination blob with zero length and full metadata.
+
+        :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy
+         Blob operation. Required.
+        :type copy_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+        copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+            "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_abort_copy_from_url_request(
+            url=self._config.url,
+            copy_id=copy_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            copy_action_abort_constant=copy_action_abort_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_tier(
+        self,
+        tier: Union[str, _models.AccessTierRequired],
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a
+        premium storage account and on a block blob in a blob storage account (locally redundant
+        storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
+        the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not
+        update the blob's ETag.
+
+        :param tier: Indicates the tier to be set on the blob. Known values are: "P4", "P6", "P10",
+         "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and "Cold".
+         Required.
+        :type tier: str or ~azure.storage.blob.models.AccessTierRequired
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived
+         blob. Known values are: "High" and "Standard". Default value is None.
+        :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tier"] = kwargs.pop("comp", _params.pop("comp", "tier"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+
+        _request = build_set_tier_request(
+            url=self._config.url,
+            tier=tier,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            rehydrate_priority=rehydrate_priority,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            if_tags=_if_tags,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_account_info(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def query(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        query_request: Optional[_models.QueryRequest] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Query operation enables users to select/project on blob data by providing simple query
+        expressions.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param query_request: the query request. Default value is None.
+        :type query_request: ~azure.storage.blob.models.QueryRequest
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["query"] = kwargs.pop("comp", _params.pop("comp", "query"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if query_request is not None:
+            _content = self._serialize.body(query_request, "QueryRequest", is_xml=True)
+        else:
+            _content = None
+
+        _request = build_query_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+
+        if response.status_code == 206:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-content-crc64"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-content-crc64")
+            )
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_tags(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.BlobTags:
+        # pylint: disable=line-too-long
+        """The Get Tags operation enables users to get the tags associated with a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: BlobTags or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.BlobTags
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+        cls: ClsType[_models.BlobTags] = kwargs.pop("cls", None)
+
+        _if_tags = None
+        _lease_id = None
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_tags_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("BlobTags", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def set_tags(
+        self,
+        timeout: Optional[int] = None,
+        version_id: Optional[str] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        tags: Optional[_models.BlobTags] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Tags operation enables users to set tags on a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param tags: Blob tags. Default value is None.
+        :type tags: ~azure.storage.blob.models.BlobTags
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_tags = None
+        _lease_id = None
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if tags is not None:
+            _content = self._serialize.body(tags, "BlobTags", is_xml=True)
+        else:
+            _content = None
+
+        _request = build_set_tags_request(
+            url=self._config.url,
+            timeout=timeout,
+            version_id=version_id,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            request_id_parameter=request_id_parameter,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_block_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_block_blob_operations.py
new file mode 100644
index 00000000..cdd31733
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_block_blob_operations.py
@@ -0,0 +1,1167 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._block_blob_operations import (
+    build_commit_block_list_request,
+    build_get_block_list_request,
+    build_put_blob_from_url_request,
+    build_stage_block_from_url_request,
+    build_stage_block_request,
+    build_upload_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class BlockBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`block_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def upload(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Block Blob operation updates the content of an existing block blob. Updating an
+        existing block blob overwrites any existing metadata on the blob. Partial updates are not
+        supported with Put Blob; the content of the existing blob is overwritten with the content of
+        the new blob. To perform a partial update of the content of a block blob, use the Put Block
+        List operation.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_upload_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            transactional_content_crc64=transactional_content_crc64,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            blob_type=blob_type,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def put_blob_from_url(
+        self,
+        content_length: int,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        blob_tags_string: Optional[str] = None,
+        copy_source_blob_properties: Optional[bool] = None,
+        copy_source_authorization: Optional[str] = None,
+        copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are
+        read from a given URL.  This API is supported beginning with the 2020-04-08 version. Partial
+        updates are not supported with Put Blob from URL; the content of an existing blob is
+        overwritten with the content of the new blob.  To perform partial updates to a block blob’s
+        contents using a source URL, use the Put Block from URL API in conjunction with Put Block List.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param copy_source_blob_properties: Optional, default is true.  Indicates if properties from
+         the source blob should be copied. Default value is None.
+        :type copy_source_blob_properties: bool
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param copy_source_tags: Optional, default 'replace'.  Indicates if source tags should be
+         copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and
+         "COPY". Default value is None.
+        :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_tags = source_modified_access_conditions.source_if_tags
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_put_blob_from_url_request(
+            url=self._config.url,
+            content_length=content_length,
+            copy_source=copy_source,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_tags=_source_if_tags,
+            request_id_parameter=request_id_parameter,
+            source_content_md5=source_content_md5,
+            blob_tags_string=blob_tags_string,
+            copy_source_blob_properties=copy_source_blob_properties,
+            copy_source_authorization=copy_source_authorization,
+            copy_source_tags=copy_source_tags,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def stage_block(
+        self,
+        block_id: str,
+        content_length: int,
+        body: IO[bytes],
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Stage Block operation creates a new block to be committed as part of a blob.
+
+        :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the
+         string must be less than or equal to 64 bytes in size. For a given blob, the length of the
+         value specified for the blockid parameter must be the same size for each block. Required.
+        :type block_id: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        _content = body
+
+        _request = build_stage_block_request(
+            url=self._config.url,
+            block_id=block_id,
+            content_length=content_length,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def stage_block_from_url(
+        self,
+        block_id: str,
+        content_length: int,
+        source_url: str,
+        source_range: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Stage Block operation creates a new block to be committed as part of a blob where the
+        contents are read from a URL.
+
+        :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the
+         string must be less than or equal to 64 bytes in size. For a given blob, the length of the
+         value specified for the blockid parameter must be the same size for each block. Required.
+        :type block_id: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_stage_block_from_url_request(
+            url=self._config.url,
+            block_id=block_id,
+            content_length=content_length,
+            source_url=source_url,
+            source_range=source_range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def commit_block_list(
+        self,
+        blocks: _models.BlockLookupList,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Commit Block List operation writes a blob by specifying the list of block IDs that make up
+        the blob. In order to be written as part of a blob, a block must have been successfully written
+        to the server in a prior Put Block operation. You can call Put Block List to update a blob by
+        uploading only those blocks that have changed, then committing the new and existing blocks
+        together. You can do this by specifying whether to commit a block from the committed block list
+        or from the uncommitted block list, or to commit the most recently uploaded version of the
+        block, whichever list it may belong to.
+
+        :param blocks: Blob Blocks. Required.
+        :type blocks: ~azure.storage.blob.models.BlockLookupList
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_cache_control = None
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = self._serialize.body(blocks, "BlockLookupList", is_xml=True)
+
+        _request = build_commit_block_list_request(
+            url=self._config.url,
+            timeout=timeout,
+            blob_cache_control=_blob_cache_control,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_block_list(
+        self,
+        snapshot: Optional[str] = None,
+        list_type: Union[str, _models.BlockListType] = "committed",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.BlockList:
+        # pylint: disable=line-too-long
+        """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a
+        block blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param list_type: Specifies whether to return the list of committed blocks, the list of
+         uncommitted blocks, or both lists together. Known values are: "committed", "uncommitted", and
+         "all". Default value is "committed".
+        :type list_type: str or ~azure.storage.blob.models.BlockListType
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: BlockList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.BlockList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+        cls: ClsType[_models.BlockList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+
+        _request = build_get_block_list_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            list_type=list_type,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("BlockList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_container_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_container_operations.py
new file mode 100644
index 00000000..6ffc5959
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_container_operations.py
@@ -0,0 +1,1818 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._container_operations import (
+    build_acquire_lease_request,
+    build_break_lease_request,
+    build_change_lease_request,
+    build_create_request,
+    build_delete_request,
+    build_filter_blobs_request,
+    build_get_access_policy_request,
+    build_get_account_info_request,
+    build_get_properties_request,
+    build_list_blob_flat_segment_request,
+    build_list_blob_hierarchy_segment_request,
+    build_release_lease_request,
+    build_rename_request,
+    build_renew_lease_request,
+    build_restore_request,
+    build_set_access_policy_request,
+    build_set_metadata_request,
+    build_submit_batch_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ContainerOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`container` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        access: Optional[Union[str, _models.PublicAccessType]] = None,
+        request_id_parameter: Optional[str] = None,
+        container_cpk_scope_info: Optional[_models.ContainerCpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """creates a new container under the specified account. If the container with the same name
+        already exists, the operation fails.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param access: Specifies whether data in the container may be accessed publicly and the level
+         of access. Known values are: "container" and "blob". Default value is None.
+        :type access: str or ~azure.storage.blob.models.PublicAccessType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param container_cpk_scope_info: Parameter group. Default value is None.
+        :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _default_encryption_scope = None
+        _prevent_encryption_scope_override = None
+        if container_cpk_scope_info is not None:
+            _default_encryption_scope = container_cpk_scope_info.default_encryption_scope
+            _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            access=access,
+            request_id_parameter=request_id_parameter,
+            default_encryption_scope=_default_encryption_scope,
+            prevent_encryption_scope_override=_prevent_encryption_scope_override,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """returns all user-defined metadata and system properties for the specified container. The data
+        returned does not include the container's list of blobs.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-public-access"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-public-access")
+        )
+        response_headers["x-ms-has-immutability-policy"] = self._deserialize(
+            "bool", response.headers.get("x-ms-has-immutability-policy")
+        )
+        response_headers["x-ms-has-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-has-legal-hold"))
+        response_headers["x-ms-default-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-default-encryption-scope")
+        )
+        response_headers["x-ms-deny-encryption-scope-override"] = self._deserialize(
+            "bool", response.headers.get("x-ms-deny-encryption-scope-override")
+        )
+        response_headers["x-ms-immutable-storage-with-versioning-enabled"] = self._deserialize(
+            "bool", response.headers.get("x-ms-immutable-storage-with-versioning-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """operation marks the specified container for deletion. The container and any blobs contained
+        within it are later deleted during garbage collection.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_metadata(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """operation sets one or more user-defined name-value pairs for the specified container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            metadata=metadata,
+            if_modified_since=_if_modified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> List[_models.SignedIdentifier]:
+        # pylint: disable=line-too-long
+        """gets the permissions for the specified container. The permissions indicate whether container
+        data may be accessed publicly.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: list of SignedIdentifier or the result of cls(response)
+        :rtype: list[~azure.storage.blob.models.SignedIdentifier]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-blob-public-access"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-public-access")
+        )
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("[SignedIdentifier]", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        access: Optional[Union[str, _models.PublicAccessType]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        container_acl: Optional[List[_models.SignedIdentifier]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """sets the permissions for the specified container. The permissions indicate whether blobs in a
+        container may be accessed publicly.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param access: Specifies whether data in the container may be accessed publicly and the level
+         of access. Known values are: "container" and "blob". Default value is None.
+        :type access: str or ~azure.storage.blob.models.PublicAccessType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param container_acl: the acls for the container. Default value is None.
+        :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True, "itemsName": "SignedIdentifier"}}
+        if container_acl is not None:
+            _content = self._serialize.body(
+                container_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt
+            )
+        else:
+            _content = None
+
+        _request = build_set_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            access=access,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def restore(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        deleted_container_name: Optional[str] = None,
+        deleted_container_version: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Restores a previously-deleted container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param deleted_container_name: Optional.  Version 2019-12-12 and later.  Specifies the name of
+         the deleted container to restore. Default value is None.
+        :type deleted_container_name: str
+        :param deleted_container_version: Optional.  Version 2019-12-12 and later.  Specifies the
+         version of the deleted container to restore. Default value is None.
+        :type deleted_container_version: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_restore_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            deleted_container_name=deleted_container_name,
+            deleted_container_version=deleted_container_version,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def rename(
+        self,
+        source_container_name: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames an existing container.
+
+        :param source_container_name: Required.  Specifies the name of the container to rename.
+         Required.
+        :type source_container_name: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_rename_request(
+            url=self._config.url,
+            source_container_name=source_container_name,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            source_lease_id=source_lease_id,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def submit_batch(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Batch operation allows multiple API calls to be embedded into a single HTTP request.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+        multipart_content_type: str = kwargs.pop(
+            "multipart_content_type", _headers.pop("Content-Type", "application/xml")
+        )
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _content = body
+
+        _request = build_submit_batch_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            multipart_content_type=multipart_content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def filter_blobs(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        where: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+        **kwargs: Any
+    ) -> _models.FilterBlobSegment:
+        # pylint: disable=line-too-long
+        """The Filter Blobs operation enables callers to list blobs in a container whose tags match a
+        given search expression.  Filter blobs searches within the given container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param where: Filters the results to return only to return only blobs whose tags match the
+         specified expression. Default value is None.
+        :type where: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem]
+        :return: FilterBlobSegment or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.FilterBlobSegment
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+        cls: ClsType[_models.FilterBlobSegment] = kwargs.pop("cls", None)
+
+        _request = build_filter_blobs_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            where=where,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("FilterBlobSegment", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def release_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def renew_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def break_lease(
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def change_lease(
+        self,
+        lease_id: str,
+        proposed_lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Required.
+        :type proposed_lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            proposed_lease_id=proposed_lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_blob_flat_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsFlatSegmentResponse:
+        # pylint: disable=line-too-long
+        """[Update] The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsFlatSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsFlatSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_flat_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsFlatSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_blob_hierarchy_segment(
+        self,
+        delimiter: str,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """[Update] The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Required.
+        :type delimiter: str
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            delimiter=delimiter,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_account_info(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py
new file mode 100644
index 00000000..05771ac6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_page_blob_operations.py
@@ -0,0 +1,1459 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._page_blob_operations import (
+    build_clear_pages_request,
+    build_copy_incremental_request,
+    build_create_request,
+    build_get_page_ranges_diff_request,
+    build_get_page_ranges_request,
+    build_resize_request,
+    build_update_sequence_number_request,
+    build_upload_pages_from_url_request,
+    build_upload_pages_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class PageBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`page_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        content_length: int,
+        blob_content_length: int,
+        timeout: Optional[int] = None,
+        tier: Optional[Union[str, _models.PremiumPageBlobAccessTier]] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        blob_sequence_number: int = 0,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create operation creates a new page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param blob_content_length: This header specifies the maximum size for the page blob, up to 1
+         TB. The page blob size must be aligned to a 512-byte boundary. Required.
+        :type blob_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param tier: Optional. Indicates the tier to be set on the page blob. Known values are: "P4",
+         "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", and "P80". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
+         value that you can use to track requests. The value of the sequence number must be between 0
+         and 2^63 - 1. Default value is 0.
+        :type blob_sequence_number: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["PageBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_create_request(
+            url=self._config.url,
+            content_length=content_length,
+            blob_content_length=blob_content_length,
+            timeout=timeout,
+            tier=tier,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_sequence_number=blob_sequence_number,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def upload_pages(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Pages operation writes a range of pages to a page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_upload_pages_request(
+            url=self._config.url,
+            content_length=content_length,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            page_write=page_write,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def clear_pages(
+        self,
+        content_length: int,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Clear Pages operation clears a set of pages from a page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["clear"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_clear_pages_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            page_write=page_write,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def upload_pages_from_url(
+        self,
+        source_url: str,
+        source_range: str,
+        content_length: int,
+        range: str,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Pages operation writes a range of pages to a page blob where the contents are read
+        from a URL.
+
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param source_range: Bytes of source data in the specified range. The length of this range
+         should match the ContentLength header and x-ms-range/Range destination range header. Required.
+        :type source_range: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param range: The range of bytes to which the source range would be written. The range should
+         be 512 aligned and range-end is required. Required.
+        :type range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_upload_pages_from_url_request(
+            url=self._config.url,
+            source_url=source_url,
+            source_range=source_range,
+            content_length=content_length,
+            range=range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            page_write=page_write,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_page_ranges(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.PageList:
+        # pylint: disable=line-too-long
+        """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
+        of a page blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: PageList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.PageList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+        cls: ClsType[_models.PageList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_page_ranges_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            marker=marker,
+            maxresults=maxresults,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("PageList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_page_ranges_diff(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        prevsnapshot: Optional[str] = None,
+        prev_snapshot_url: Optional[str] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.PageList:
+        # pylint: disable=line-too-long
+        """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
+        were changed between target blob and previous snapshot.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a
+         DateTime value that specifies that the response will contain only pages that were changed
+         between target blob and previous snapshot. Changed pages include both updated and cleared
+         pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is
+         the older of the two. Note that incremental snapshots are currently supported only for blobs
+         created on or after January 1, 2016. Default value is None.
+        :type prevsnapshot: str
+        :param prev_snapshot_url: Optional. This header is only supported in service versions
+         2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The
+         response will only contain pages that were changed between the target blob and its previous
+         snapshot. Default value is None.
+        :type prev_snapshot_url: str
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: PageList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.PageList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+        cls: ClsType[_models.PageList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_page_ranges_diff_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            prevsnapshot=prevsnapshot,
+            prev_snapshot_url=prev_snapshot_url,
+            range=range,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            marker=marker,
+            maxresults=maxresults,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("PageList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def resize(
+        self,
+        blob_content_length: int,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Resize the Blob.
+
+        :param blob_content_length: This header specifies the maximum size for the page blob, up to 1
+         TB. The page blob size must be aligned to a 512-byte boundary. Required.
+        :type blob_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_resize_request(
+            url=self._config.url,
+            blob_content_length=blob_content_length,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def update_sequence_number(
+        self,
+        sequence_number_action: Union[str, _models.SequenceNumberActionType],
+        timeout: Optional[int] = None,
+        blob_sequence_number: int = 0,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Update the sequence number of the blob.
+
+        :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the
+         request. This property applies to page blobs only. This property indicates how the service
+         should modify the blob's sequence number. Known values are: "max", "update", and "increment".
+         Required.
+        :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
+         value that you can use to track requests. The value of the sequence number must be between 0
+         and 2^63 - 1. Default value is 0.
+        :type blob_sequence_number: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_update_sequence_number_request(
+            url=self._config.url,
+            sequence_number_action=sequence_number_action,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_sequence_number=blob_sequence_number,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def copy_incremental(
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Copy Incremental operation copies a snapshot of the source page blob to a destination page
+        blob. The snapshot is copied such that only the differential changes between the previously
+        copied snapshot are transferred to the destination. The copied snapshots are complete copies of
+        the original snapshot and can be read or copied from as usual. This API is supported since REST
+        version 2016-05-31.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["incrementalcopy"] = kwargs.pop("comp", _params.pop("comp", "incrementalcopy"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_copy_incremental_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000..3c2fc650
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,755 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._service_operations import (
+    build_filter_blobs_request,
+    build_get_account_info_request,
+    build_get_properties_request,
+    build_get_statistics_request,
+    build_get_user_delegation_key_request,
+    build_list_containers_segment_request,
+    build_set_properties_request,
+    build_submit_batch_request,
+)
+from .._configuration import AzureBlobStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.aio.AzureBlobStorage`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        storage_service_properties: _models.StorageServiceProperties,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for a storage account's Blob service endpoint, including properties for Storage
+        Analytics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param storage_service_properties: The StorageService properties. Required.
+        :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> _models.StorageServiceProperties:
+        # pylint: disable=line-too-long
+        """gets the properties of a storage account's Blob service, including properties for Storage
+        Analytics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: StorageServiceProperties or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.StorageServiceProperties
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("StorageServiceProperties", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_statistics(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> _models.StorageServiceStats:
+        # pylint: disable=line-too-long
+        """Retrieves statistics related to replication for the Blob service. It is only available on the
+        secondary location endpoint when read-access geo-redundant replication is enabled for the
+        storage account.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: StorageServiceStats or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.StorageServiceStats
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+        cls: ClsType[_models.StorageServiceStats] = kwargs.pop("cls", None)
+
+        _request = build_get_statistics_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("StorageServiceStats", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_containers_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListContainersIncludeType]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListContainersSegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Containers Segment operation returns a list of the containers under the specified
+        account.
+
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify that the container's metadata be returned as
+         part of the response body. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListContainersSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListContainersSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_containers_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("ListContainersSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_user_delegation_key(
+        self,
+        key_info: _models.KeyInfo,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.UserDelegationKey:
+        # pylint: disable=line-too-long
+        """Retrieves a user delegation key for the Blob service. This is only a valid operation when using
+        bearer token authentication.
+
+        :param key_info: Key information. Required.
+        :type key_info: ~azure.storage.blob.models.KeyInfo
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: UserDelegationKey or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.UserDelegationKey
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["userdelegationkey"] = kwargs.pop("comp", _params.pop("comp", "userdelegationkey"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[_models.UserDelegationKey] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(key_info, "KeyInfo", is_xml=True)
+
+        _request = build_get_user_delegation_key_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("UserDelegationKey", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_account_info(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def submit_batch(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Batch operation allows multiple API calls to be embedded into a single HTTP request.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+        multipart_content_type: str = kwargs.pop(
+            "multipart_content_type", _headers.pop("Content-Type", "application/xml")
+        )
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _content = body
+
+        _request = build_submit_batch_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            multipart_content_type=multipart_content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def filter_blobs(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        where: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+        **kwargs: Any
+    ) -> _models.FilterBlobSegment:
+        # pylint: disable=line-too-long
+        """The Filter Blobs operation enables callers to list blobs across all containers whose tags match
+        a given search expression.  Filter blobs searches across all containers within a storage
+        account but can be scoped within the expression to a single container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param where: Filters the results to return only to return only blobs whose tags match the
+         specified expression. Default value is None.
+        :type where: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem]
+        :return: FilterBlobSegment or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.FilterBlobSegment
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+        cls: ClsType[_models.FilterBlobSegment] = kwargs.pop("cls", None)
+
+        _request = build_filter_blobs_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            where=where,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("FilterBlobSegment", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/__init__.py
new file mode 100644
index 00000000..bb9dc27d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/__init__.py
@@ -0,0 +1,184 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import (  # type: ignore
+    AccessPolicy,
+    AppendPositionAccessConditions,
+    ArrowConfiguration,
+    ArrowField,
+    BlobFlatListSegment,
+    BlobHTTPHeaders,
+    BlobHierarchyListSegment,
+    BlobItemInternal,
+    BlobMetadata,
+    BlobName,
+    BlobPrefix,
+    BlobPropertiesInternal,
+    BlobTag,
+    BlobTags,
+    Block,
+    BlockList,
+    BlockLookupList,
+    ClearRange,
+    ContainerCpkScopeInfo,
+    ContainerItem,
+    ContainerProperties,
+    CorsRule,
+    CpkInfo,
+    CpkScopeInfo,
+    DelimitedTextConfiguration,
+    FilterBlobItem,
+    FilterBlobSegment,
+    GeoReplication,
+    JsonTextConfiguration,
+    KeyInfo,
+    LeaseAccessConditions,
+    ListBlobsFlatSegmentResponse,
+    ListBlobsHierarchySegmentResponse,
+    ListContainersSegmentResponse,
+    Logging,
+    Metrics,
+    ModifiedAccessConditions,
+    PageList,
+    PageRange,
+    QueryFormat,
+    QueryRequest,
+    QuerySerialization,
+    RetentionPolicy,
+    SequenceNumberAccessConditions,
+    SignedIdentifier,
+    SourceModifiedAccessConditions,
+    StaticWebsite,
+    StorageError,
+    StorageServiceProperties,
+    StorageServiceStats,
+    UserDelegationKey,
+)
+
+from ._azure_blob_storage_enums import (  # type: ignore
+    AccessTier,
+    AccessTierOptional,
+    AccessTierRequired,
+    AccountKind,
+    ArchiveStatus,
+    BlobCopySourceTags,
+    BlobExpiryOptions,
+    BlobImmutabilityPolicyMode,
+    BlobType,
+    BlockListType,
+    CopyStatusType,
+    DeleteSnapshotsOptionType,
+    EncryptionAlgorithmType,
+    FilterBlobsIncludeItem,
+    GeoReplicationStatusType,
+    LeaseDurationType,
+    LeaseStateType,
+    LeaseStatusType,
+    ListBlobsIncludeItem,
+    ListContainersIncludeType,
+    PremiumPageBlobAccessTier,
+    PublicAccessType,
+    QueryFormatType,
+    RehydratePriority,
+    SequenceNumberActionType,
+    SkuName,
+    StorageErrorCode,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AccessPolicy",
+    "AppendPositionAccessConditions",
+    "ArrowConfiguration",
+    "ArrowField",
+    "BlobFlatListSegment",
+    "BlobHTTPHeaders",
+    "BlobHierarchyListSegment",
+    "BlobItemInternal",
+    "BlobMetadata",
+    "BlobName",
+    "BlobPrefix",
+    "BlobPropertiesInternal",
+    "BlobTag",
+    "BlobTags",
+    "Block",
+    "BlockList",
+    "BlockLookupList",
+    "ClearRange",
+    "ContainerCpkScopeInfo",
+    "ContainerItem",
+    "ContainerProperties",
+    "CorsRule",
+    "CpkInfo",
+    "CpkScopeInfo",
+    "DelimitedTextConfiguration",
+    "FilterBlobItem",
+    "FilterBlobSegment",
+    "GeoReplication",
+    "JsonTextConfiguration",
+    "KeyInfo",
+    "LeaseAccessConditions",
+    "ListBlobsFlatSegmentResponse",
+    "ListBlobsHierarchySegmentResponse",
+    "ListContainersSegmentResponse",
+    "Logging",
+    "Metrics",
+    "ModifiedAccessConditions",
+    "PageList",
+    "PageRange",
+    "QueryFormat",
+    "QueryRequest",
+    "QuerySerialization",
+    "RetentionPolicy",
+    "SequenceNumberAccessConditions",
+    "SignedIdentifier",
+    "SourceModifiedAccessConditions",
+    "StaticWebsite",
+    "StorageError",
+    "StorageServiceProperties",
+    "StorageServiceStats",
+    "UserDelegationKey",
+    "AccessTier",
+    "AccessTierOptional",
+    "AccessTierRequired",
+    "AccountKind",
+    "ArchiveStatus",
+    "BlobCopySourceTags",
+    "BlobExpiryOptions",
+    "BlobImmutabilityPolicyMode",
+    "BlobType",
+    "BlockListType",
+    "CopyStatusType",
+    "DeleteSnapshotsOptionType",
+    "EncryptionAlgorithmType",
+    "FilterBlobsIncludeItem",
+    "GeoReplicationStatusType",
+    "LeaseDurationType",
+    "LeaseStateType",
+    "LeaseStatusType",
+    "ListBlobsIncludeItem",
+    "ListContainersIncludeType",
+    "PremiumPageBlobAccessTier",
+    "PublicAccessType",
+    "QueryFormatType",
+    "RehydratePriority",
+    "SequenceNumberActionType",
+    "SkuName",
+    "StorageErrorCode",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py
new file mode 100644
index 00000000..12ccbf73
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py
@@ -0,0 +1,392 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class AccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """AccessTier."""
+
+    P4 = "P4"
+    P6 = "P6"
+    P10 = "P10"
+    P15 = "P15"
+    P20 = "P20"
+    P30 = "P30"
+    P40 = "P40"
+    P50 = "P50"
+    P60 = "P60"
+    P70 = "P70"
+    P80 = "P80"
+    HOT = "Hot"
+    COOL = "Cool"
+    ARCHIVE = "Archive"
+    PREMIUM = "Premium"
+    COLD = "Cold"
+
+
+class AccessTierOptional(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """AccessTierOptional."""
+
+    P4 = "P4"
+    P6 = "P6"
+    P10 = "P10"
+    P15 = "P15"
+    P20 = "P20"
+    P30 = "P30"
+    P40 = "P40"
+    P50 = "P50"
+    P60 = "P60"
+    P70 = "P70"
+    P80 = "P80"
+    HOT = "Hot"
+    COOL = "Cool"
+    ARCHIVE = "Archive"
+    COLD = "Cold"
+
+
+class AccessTierRequired(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """AccessTierRequired."""
+
+    P4 = "P4"
+    P6 = "P6"
+    P10 = "P10"
+    P15 = "P15"
+    P20 = "P20"
+    P30 = "P30"
+    P40 = "P40"
+    P50 = "P50"
+    P60 = "P60"
+    P70 = "P70"
+    P80 = "P80"
+    HOT = "Hot"
+    COOL = "Cool"
+    ARCHIVE = "Archive"
+    COLD = "Cold"
+
+
+class AccountKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """AccountKind."""
+
+    STORAGE = "Storage"
+    BLOB_STORAGE = "BlobStorage"
+    STORAGE_V2 = "StorageV2"
+    FILE_STORAGE = "FileStorage"
+    BLOCK_BLOB_STORAGE = "BlockBlobStorage"
+
+
+class ArchiveStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ArchiveStatus."""
+
+    REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot"
+    REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool"
+    REHYDRATE_PENDING_TO_COLD = "rehydrate-pending-to-cold"
+
+
+class BlobCopySourceTags(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """BlobCopySourceTags."""
+
+    REPLACE = "REPLACE"
+    COPY = "COPY"
+
+
+class BlobExpiryOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """BlobExpiryOptions."""
+
+    NEVER_EXPIRE = "NeverExpire"
+    RELATIVE_TO_CREATION = "RelativeToCreation"
+    RELATIVE_TO_NOW = "RelativeToNow"
+    ABSOLUTE = "Absolute"
+
+
+class BlobImmutabilityPolicyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """BlobImmutabilityPolicyMode."""
+
+    MUTABLE = "Mutable"
+    UNLOCKED = "Unlocked"
+    LOCKED = "Locked"
+
+
+class BlobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """BlobType."""
+
+    BLOCK_BLOB = "BlockBlob"
+    PAGE_BLOB = "PageBlob"
+    APPEND_BLOB = "AppendBlob"
+
+
+class BlockListType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """BlockListType."""
+
+    COMMITTED = "committed"
+    UNCOMMITTED = "uncommitted"
+    ALL = "all"
+
+
+class CopyStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """CopyStatusType."""
+
+    PENDING = "pending"
+    SUCCESS = "success"
+    ABORTED = "aborted"
+    FAILED = "failed"
+
+
+class DeleteSnapshotsOptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """DeleteSnapshotsOptionType."""
+
+    INCLUDE = "include"
+    ONLY = "only"
+
+
+class EncryptionAlgorithmType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """EncryptionAlgorithmType."""
+
+    NONE = "None"
+    AES256 = "AES256"
+
+
+class FilterBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """FilterBlobsIncludeItem."""
+
+    NONE = "none"
+    VERSIONS = "versions"
+
+
+class GeoReplicationStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """The status of the secondary location."""
+
+    LIVE = "live"
+    BOOTSTRAP = "bootstrap"
+    UNAVAILABLE = "unavailable"
+
+
+class LeaseDurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """LeaseDurationType."""
+
+    INFINITE = "infinite"
+    FIXED = "fixed"
+
+
+class LeaseStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """LeaseStateType."""
+
+    AVAILABLE = "available"
+    LEASED = "leased"
+    EXPIRED = "expired"
+    BREAKING = "breaking"
+    BROKEN = "broken"
+
+
+class LeaseStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """LeaseStatusType."""
+
+    LOCKED = "locked"
+    UNLOCKED = "unlocked"
+
+
+class ListBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListBlobsIncludeItem."""
+
+    COPY = "copy"
+    DELETED = "deleted"
+    METADATA = "metadata"
+    SNAPSHOTS = "snapshots"
+    UNCOMMITTEDBLOBS = "uncommittedblobs"
+    VERSIONS = "versions"
+    TAGS = "tags"
+    IMMUTABILITYPOLICY = "immutabilitypolicy"
+    LEGALHOLD = "legalhold"
+    DELETEDWITHVERSIONS = "deletedwithversions"
+
+
+class ListContainersIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListContainersIncludeType."""
+
+    METADATA = "metadata"
+    DELETED = "deleted"
+    SYSTEM = "system"
+
+
+class PremiumPageBlobAccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PremiumPageBlobAccessTier."""
+
+    P4 = "P4"
+    P6 = "P6"
+    P10 = "P10"
+    P15 = "P15"
+    P20 = "P20"
+    P30 = "P30"
+    P40 = "P40"
+    P50 = "P50"
+    P60 = "P60"
+    P70 = "P70"
+    P80 = "P80"
+
+
+class PublicAccessType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PublicAccessType."""
+
+    CONTAINER = "container"
+    BLOB = "blob"
+
+
+class QueryFormatType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """The quick query format type."""
+
+    DELIMITED = "delimited"
+    JSON = "json"
+    ARROW = "arrow"
+    PARQUET = "parquet"
+
+
+class RehydratePriority(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """If an object is in rehydrate pending state then this header is returned with priority of
+    rehydrate. Valid values are High and Standard.
+    """
+
+    HIGH = "High"
+    STANDARD = "Standard"
+
+
+class SequenceNumberActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """SequenceNumberActionType."""
+
+    MAX = "max"
+    UPDATE = "update"
+    INCREMENT = "increment"
+
+
+class SkuName(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """SkuName."""
+
+    STANDARD_LRS = "Standard_LRS"
+    STANDARD_GRS = "Standard_GRS"
+    STANDARD_RAGRS = "Standard_RAGRS"
+    STANDARD_ZRS = "Standard_ZRS"
+    PREMIUM_LRS = "Premium_LRS"
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Error codes returned by the service."""
+
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+    APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
+    BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
+    BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy"
+    BLOB_NOT_FOUND = "BlobNotFound"
+    BLOB_OVERWRITTEN = "BlobOverwritten"
+    BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
+    BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION = "BlobUsesCustomerSpecifiedEncryption"
+    BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
+    BLOCK_LIST_TOO_LONG = "BlockListTooLong"
+    CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
+    CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
+    CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
+    CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
+    CONTAINER_DISABLED = "ContainerDisabled"
+    CONTAINER_NOT_FOUND = "ContainerNotFound"
+    CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
+    COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
+    COPY_ID_MISMATCH = "CopyIdMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
+    INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
+    INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
+    INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
+    INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
+    INVALID_BLOB_TIER = "InvalidBlobTier"
+    INVALID_BLOB_TYPE = "InvalidBlobType"
+    INVALID_BLOCK_ID = "InvalidBlockId"
+    INVALID_BLOCK_LIST = "InvalidBlockList"
+    INVALID_OPERATION = "InvalidOperation"
+    INVALID_PAGE_RANGE = "InvalidPageRange"
+    INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
+    INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
+    INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
+    LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
+    LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
+    LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
+    LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
+    LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
+    LEASE_ID_MISSING = "LeaseIdMissing"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
+    LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
+    LEASE_LOST = "LeaseLost"
+    LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
+    LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
+    LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
+    MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
+    NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
+    NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
+    OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
+    PENDING_COPY_OPERATION = "PendingCopyOperation"
+    PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
+    SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
+    SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
+    SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
+    SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    SNAPSHOTS_PRESENT = "SnapshotsPresent"
+    SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
+    SYSTEM_IN_USE = "SystemInUse"
+    TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
+    UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
+    BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
+    BLOB_ARCHIVED = "BlobArchived"
+    BLOB_NOT_ARCHIVED = "BlobNotArchived"
+    AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch"
+    AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch"
+    AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch"
+    AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch"
+    AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch"
+    BLOB_ACCESS_TIER_NOT_SUPPORTED_FOR_ACCOUNT_TYPE = "BlobAccessTierNotSupportedForAccountType"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_models_py3.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_models_py3.py
new file mode 100644
index 00000000..4160ece1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_models_py3.py
@@ -0,0 +1,2771 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+import sys
+from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
+
+from .. import _serialization
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+
+if TYPE_CHECKING:
+    from .. import models as _models
+JSON = MutableMapping[str, Any]  # pylint: disable=unsubscriptable-object
+
+
+class AccessPolicy(_serialization.Model):
+    """An Access policy.
+
+    :ivar start: the date-time the policy is active.
+    :vartype start: str
+    :ivar expiry: the date-time the policy expires.
+    :vartype expiry: str
+    :ivar permission: the permissions for the acl policy.
+    :vartype permission: str
+    """
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "str"},
+        "expiry": {"key": "Expiry", "type": "str"},
+        "permission": {"key": "Permission", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        start: Optional[str] = None,
+        expiry: Optional[str] = None,
+        permission: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword start: the date-time the policy is active.
+        :paramtype start: str
+        :keyword expiry: the date-time the policy expires.
+        :paramtype expiry: str
+        :keyword permission: the permissions for the acl policy.
+        :paramtype permission: str
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.expiry = expiry
+        self.permission = permission
+
+
+class AppendPositionAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar max_size: Optional conditional header. The max length in bytes permitted for the append
+     blob. If the Append Block operation would cause the blob to exceed that limit or if the blob
+     size is already greater than the value specified in this header, the request will fail with
+     MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+    :vartype max_size: int
+    :ivar append_position: Optional conditional header, used only for the Append Block operation. A
+     number indicating the byte offset to compare. Append Block will succeed only if the append
+     position is equal to this number. If it is not, the request will fail with the
+     AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
+    :vartype append_position: int
+    """
+
+    _attribute_map = {
+        "max_size": {"key": "maxSize", "type": "int"},
+        "append_position": {"key": "appendPosition", "type": "int"},
+    }
+
+    def __init__(self, *, max_size: Optional[int] = None, append_position: Optional[int] = None, **kwargs: Any) -> None:
+        """
+        :keyword max_size: Optional conditional header. The max length in bytes permitted for the
+         append blob. If the Append Block operation would cause the blob to exceed that limit or if the
+         blob size is already greater than the value specified in this header, the request will fail
+         with MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :paramtype max_size: int
+        :keyword append_position: Optional conditional header, used only for the Append Block
+         operation. A number indicating the byte offset to compare. Append Block will succeed only if
+         the append position is equal to this number. If it is not, the request will fail with the
+         AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :paramtype append_position: int
+        """
+        super().__init__(**kwargs)
+        self.max_size = max_size
+        self.append_position = append_position
+
+
+class ArrowConfiguration(_serialization.Model):
+    """Groups the settings used for formatting the response if the response should be Arrow formatted.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar schema: Required.
+    :vartype schema: list[~azure.storage.blob.models.ArrowField]
+    """
+
+    _validation = {
+        "schema": {"required": True},
+    }
+
+    _attribute_map = {
+        "schema": {
+            "key": "Schema",
+            "type": "[ArrowField]",
+            "xml": {"name": "Schema", "wrapped": True, "itemsName": "Field"},
+        },
+    }
+    _xml_map = {"name": "ArrowConfiguration"}
+
+    def __init__(self, *, schema: List["_models.ArrowField"], **kwargs: Any) -> None:
+        """
+        :keyword schema: Required.
+        :paramtype schema: list[~azure.storage.blob.models.ArrowField]
+        """
+        super().__init__(**kwargs)
+        self.schema = schema
+
+
+class ArrowField(_serialization.Model):
+    """Groups settings regarding specific field of an arrow schema.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar type: Required.
+    :vartype type: str
+    :ivar name:
+    :vartype name: str
+    :ivar precision:
+    :vartype precision: int
+    :ivar scale:
+    :vartype scale: int
+    """
+
+    _validation = {
+        "type": {"required": True},
+    }
+
+    _attribute_map = {
+        "type": {"key": "Type", "type": "str"},
+        "name": {"key": "Name", "type": "str"},
+        "precision": {"key": "Precision", "type": "int"},
+        "scale": {"key": "Scale", "type": "int"},
+    }
+    _xml_map = {"name": "Field"}
+
+    def __init__(
+        self,
+        *,
+        type: str,
+        name: Optional[str] = None,
+        precision: Optional[int] = None,
+        scale: Optional[int] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword type: Required.
+        :paramtype type: str
+        :keyword name:
+        :paramtype name: str
+        :keyword precision:
+        :paramtype precision: int
+        :keyword scale:
+        :paramtype scale: int
+        """
+        super().__init__(**kwargs)
+        self.type = type
+        self.name = name
+        self.precision = precision
+        self.scale = scale
+
+
+class BlobFlatListSegment(_serialization.Model):
+    """BlobFlatListSegment.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar blob_items: Required.
+    :vartype blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+    """
+
+    _validation = {
+        "blob_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"itemsName": "Blob"}},
+    }
+    _xml_map = {"name": "Blobs"}
+
+    def __init__(self, *, blob_items: List["_models.BlobItemInternal"], **kwargs: Any) -> None:
+        """
+        :keyword blob_items: Required.
+        :paramtype blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+        """
+        super().__init__(**kwargs)
+        self.blob_items = blob_items
+
+
+class BlobHierarchyListSegment(_serialization.Model):
+    """BlobHierarchyListSegment.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar blob_prefixes:
+    :vartype blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
+    :ivar blob_items: Required.
+    :vartype blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+    """
+
+    _validation = {
+        "blob_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "blob_prefixes": {"key": "BlobPrefixes", "type": "[BlobPrefix]", "xml": {"name": "BlobPrefix"}},
+        "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"name": "Blob", "itemsName": "Blob"}},
+    }
+    _xml_map = {"name": "Blobs"}
+
+    def __init__(
+        self,
+        *,
+        blob_items: List["_models.BlobItemInternal"],
+        blob_prefixes: Optional[List["_models.BlobPrefix"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword blob_prefixes:
+        :paramtype blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
+        :keyword blob_items: Required.
+        :paramtype blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+        """
+        super().__init__(**kwargs)
+        self.blob_prefixes = blob_prefixes
+        self.blob_items = blob_items
+
+
+class BlobHTTPHeaders(_serialization.Model):
+    """Parameter group.
+
+    :ivar blob_cache_control: Optional. Sets the blob's cache control. If specified, this property
+     is stored with the blob and returned with a read request.
+    :vartype blob_cache_control: str
+    :ivar blob_content_type: Optional. Sets the blob's content type. If specified, this property is
+     stored with the blob and returned with a read request.
+    :vartype blob_content_type: str
+    :ivar blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not
+     validated, as the hashes for the individual blocks were validated when each was uploaded.
+    :vartype blob_content_md5: bytes
+    :ivar blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this
+     property is stored with the blob and returned with a read request.
+    :vartype blob_content_encoding: str
+    :ivar blob_content_language: Optional. Set the blob's content language. If specified, this
+     property is stored with the blob and returned with a read request.
+    :vartype blob_content_language: str
+    :ivar blob_content_disposition: Optional. Sets the blob's Content-Disposition header.
+    :vartype blob_content_disposition: str
+    """
+
+    _attribute_map = {
+        "blob_cache_control": {"key": "blobCacheControl", "type": "str"},
+        "blob_content_type": {"key": "blobContentType", "type": "str"},
+        "blob_content_md5": {"key": "blobContentMD5", "type": "bytearray"},
+        "blob_content_encoding": {"key": "blobContentEncoding", "type": "str"},
+        "blob_content_language": {"key": "blobContentLanguage", "type": "str"},
+        "blob_content_disposition": {"key": "blobContentDisposition", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        blob_cache_control: Optional[str] = None,
+        blob_content_type: Optional[str] = None,
+        blob_content_md5: Optional[bytes] = None,
+        blob_content_encoding: Optional[str] = None,
+        blob_content_language: Optional[str] = None,
+        blob_content_disposition: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword blob_cache_control: Optional. Sets the blob's cache control. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype blob_cache_control: str
+        :keyword blob_content_type: Optional. Sets the blob's content type. If specified, this property
+         is stored with the blob and returned with a read request.
+        :paramtype blob_content_type: str
+        :keyword blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is
+         not validated, as the hashes for the individual blocks were validated when each was uploaded.
+        :paramtype blob_content_md5: bytes
+        :keyword blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype blob_content_encoding: str
+        :keyword blob_content_language: Optional. Set the blob's content language. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype blob_content_language: str
+        :keyword blob_content_disposition: Optional. Sets the blob's Content-Disposition header.
+        :paramtype blob_content_disposition: str
+        """
+        super().__init__(**kwargs)
+        self.blob_cache_control = blob_cache_control
+        self.blob_content_type = blob_content_type
+        self.blob_content_md5 = blob_content_md5
+        self.blob_content_encoding = blob_content_encoding
+        self.blob_content_language = blob_content_language
+        self.blob_content_disposition = blob_content_disposition
+
+
+class BlobItemInternal(_serialization.Model):
+    """An Azure Storage blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: ~azure.storage.blob.models.BlobName
+    :ivar deleted: Required.
+    :vartype deleted: bool
+    :ivar snapshot: Required.
+    :vartype snapshot: str
+    :ivar version_id:
+    :vartype version_id: str
+    :ivar is_current_version:
+    :vartype is_current_version: bool
+    :ivar properties: Properties of a blob. Required.
+    :vartype properties: ~azure.storage.blob.models.BlobPropertiesInternal
+    :ivar metadata:
+    :vartype metadata: ~azure.storage.blob.models.BlobMetadata
+    :ivar blob_tags: Blob tags.
+    :vartype blob_tags: ~azure.storage.blob.models.BlobTags
+    :ivar has_versions_only:
+    :vartype has_versions_only: bool
+    :ivar object_replication_metadata: Dictionary of :code:`<string>`.
+    :vartype object_replication_metadata: dict[str, str]
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "deleted": {"required": True},
+        "snapshot": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "BlobName"},
+        "deleted": {"key": "Deleted", "type": "bool"},
+        "snapshot": {"key": "Snapshot", "type": "str"},
+        "version_id": {"key": "VersionId", "type": "str"},
+        "is_current_version": {"key": "IsCurrentVersion", "type": "bool"},
+        "properties": {"key": "Properties", "type": "BlobPropertiesInternal"},
+        "metadata": {"key": "Metadata", "type": "BlobMetadata"},
+        "blob_tags": {"key": "BlobTags", "type": "BlobTags"},
+        "has_versions_only": {"key": "HasVersionsOnly", "type": "bool"},
+        "object_replication_metadata": {"key": "OrMetadata", "type": "{str}"},
+    }
+    _xml_map = {"name": "Blob"}
+
+    def __init__(
+        self,
+        *,
+        name: "_models.BlobName",
+        deleted: bool,
+        snapshot: str,
+        properties: "_models.BlobPropertiesInternal",
+        version_id: Optional[str] = None,
+        is_current_version: Optional[bool] = None,
+        metadata: Optional["_models.BlobMetadata"] = None,
+        blob_tags: Optional["_models.BlobTags"] = None,
+        has_versions_only: Optional[bool] = None,
+        object_replication_metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: ~azure.storage.blob.models.BlobName
+        :keyword deleted: Required.
+        :paramtype deleted: bool
+        :keyword snapshot: Required.
+        :paramtype snapshot: str
+        :keyword version_id:
+        :paramtype version_id: str
+        :keyword is_current_version:
+        :paramtype is_current_version: bool
+        :keyword properties: Properties of a blob. Required.
+        :paramtype properties: ~azure.storage.blob.models.BlobPropertiesInternal
+        :keyword metadata:
+        :paramtype metadata: ~azure.storage.blob.models.BlobMetadata
+        :keyword blob_tags: Blob tags.
+        :paramtype blob_tags: ~azure.storage.blob.models.BlobTags
+        :keyword has_versions_only:
+        :paramtype has_versions_only: bool
+        :keyword object_replication_metadata: Dictionary of :code:`<string>`.
+        :paramtype object_replication_metadata: dict[str, str]
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.deleted = deleted
+        self.snapshot = snapshot
+        self.version_id = version_id
+        self.is_current_version = is_current_version
+        self.properties = properties
+        self.metadata = metadata
+        self.blob_tags = blob_tags
+        self.has_versions_only = has_versions_only
+        self.object_replication_metadata = object_replication_metadata
+
+
+class BlobMetadata(_serialization.Model):
+    """BlobMetadata.
+
+    :ivar additional_properties: Unmatched properties from the message are deserialized to this
+     collection.
+    :vartype additional_properties: dict[str, str]
+    :ivar encrypted:
+    :vartype encrypted: str
+    """
+
+    _attribute_map = {
+        "additional_properties": {"key": "", "type": "{str}"},
+        "encrypted": {"key": "Encrypted", "type": "str", "xml": {"attr": True}},
+    }
+    _xml_map = {"name": "Metadata"}
+
+    def __init__(
+        self, *, additional_properties: Optional[Dict[str, str]] = None, encrypted: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        """
+        :keyword additional_properties: Unmatched properties from the message are deserialized to this
+         collection.
+        :paramtype additional_properties: dict[str, str]
+        :keyword encrypted:
+        :paramtype encrypted: str
+        """
+        super().__init__(**kwargs)
+        self.additional_properties = additional_properties
+        self.encrypted = encrypted
+
+
+class BlobName(_serialization.Model):
+    """BlobName.
+
+    :ivar encoded: Indicates if the blob name is encoded.
+    :vartype encoded: bool
+    :ivar content: The name of the blob.
+    :vartype content: str
+    """
+
+    _attribute_map = {
+        "encoded": {"key": "Encoded", "type": "bool", "xml": {"name": "Encoded", "attr": True}},
+        "content": {"key": "content", "type": "str", "xml": {"text": True}},
+    }
+
+    def __init__(self, *, encoded: Optional[bool] = None, content: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword encoded: Indicates if the blob name is encoded.
+        :paramtype encoded: bool
+        :keyword content: The name of the blob.
+        :paramtype content: str
+        """
+        super().__init__(**kwargs)
+        self.encoded = encoded
+        self.content = content
+
+
+class BlobPrefix(_serialization.Model):
+    """BlobPrefix.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: ~azure.storage.blob.models.BlobName
+    """
+
+    _validation = {
+        "name": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "BlobName"},
+    }
+
+    def __init__(self, *, name: "_models.BlobName", **kwargs: Any) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: ~azure.storage.blob.models.BlobName
+        """
+        super().__init__(**kwargs)
+        self.name = name
+
+
+class BlobPropertiesInternal(_serialization.Model):
+    """Properties of a blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar creation_time:
+    :vartype creation_time: ~datetime.datetime
+    :ivar last_modified: Required.
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag: Required.
+    :vartype etag: str
+    :ivar content_length: Size in bytes.
+    :vartype content_length: int
+    :ivar content_type:
+    :vartype content_type: str
+    :ivar content_encoding:
+    :vartype content_encoding: str
+    :ivar content_language:
+    :vartype content_language: str
+    :ivar content_md5:
+    :vartype content_md5: bytes
+    :ivar content_disposition:
+    :vartype content_disposition: str
+    :ivar cache_control:
+    :vartype cache_control: str
+    :ivar blob_sequence_number:
+    :vartype blob_sequence_number: int
+    :ivar blob_type: Known values are: "BlockBlob", "PageBlob", and "AppendBlob".
+    :vartype blob_type: str or ~azure.storage.blob.models.BlobType
+    :ivar lease_status: Known values are: "locked" and "unlocked".
+    :vartype lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+    :ivar lease_state: Known values are: "available", "leased", "expired", "breaking", and
+     "broken".
+    :vartype lease_state: str or ~azure.storage.blob.models.LeaseStateType
+    :ivar lease_duration: Known values are: "infinite" and "fixed".
+    :vartype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+    :ivar copy_id:
+    :vartype copy_id: str
+    :ivar copy_status: Known values are: "pending", "success", "aborted", and "failed".
+    :vartype copy_status: str or ~azure.storage.blob.models.CopyStatusType
+    :ivar copy_source:
+    :vartype copy_source: str
+    :ivar copy_progress:
+    :vartype copy_progress: str
+    :ivar copy_completion_time:
+    :vartype copy_completion_time: ~datetime.datetime
+    :ivar copy_status_description:
+    :vartype copy_status_description: str
+    :ivar server_encrypted:
+    :vartype server_encrypted: bool
+    :ivar incremental_copy:
+    :vartype incremental_copy: bool
+    :ivar destination_snapshot:
+    :vartype destination_snapshot: str
+    :ivar deleted_time:
+    :vartype deleted_time: ~datetime.datetime
+    :ivar remaining_retention_days:
+    :vartype remaining_retention_days: int
+    :ivar access_tier: Known values are: "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50",
+     "P60", "P70", "P80", "Hot", "Cool", "Archive", "Premium", and "Cold".
+    :vartype access_tier: str or ~azure.storage.blob.models.AccessTier
+    :ivar access_tier_inferred:
+    :vartype access_tier_inferred: bool
+    :ivar archive_status: Known values are: "rehydrate-pending-to-hot",
+     "rehydrate-pending-to-cool", and "rehydrate-pending-to-cold".
+    :vartype archive_status: str or ~azure.storage.blob.models.ArchiveStatus
+    :ivar customer_provided_key_sha256:
+    :vartype customer_provided_key_sha256: str
+    :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
+    :vartype encryption_scope: str
+    :ivar access_tier_change_time:
+    :vartype access_tier_change_time: ~datetime.datetime
+    :ivar tag_count:
+    :vartype tag_count: int
+    :ivar expires_on:
+    :vartype expires_on: ~datetime.datetime
+    :ivar is_sealed:
+    :vartype is_sealed: bool
+    :ivar rehydrate_priority: If an object is in rehydrate pending state then this header is
+     returned with priority of rehydrate. Valid values are High and Standard. Known values are:
+     "High" and "Standard".
+    :vartype rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+    :ivar last_accessed_on:
+    :vartype last_accessed_on: ~datetime.datetime
+    :ivar immutability_policy_expires_on:
+    :vartype immutability_policy_expires_on: ~datetime.datetime
+    :ivar immutability_policy_mode: Known values are: "Mutable", "Unlocked", and "Locked".
+    :vartype immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+    :ivar legal_hold:
+    :vartype legal_hold: bool
+    """
+
+    _validation = {
+        "last_modified": {"required": True},
+        "etag": {"required": True},
+    }
+
+    _attribute_map = {
+        "creation_time": {"key": "Creation-Time", "type": "rfc-1123"},
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+        "content_length": {"key": "Content-Length", "type": "int"},
+        "content_type": {"key": "Content-Type", "type": "str"},
+        "content_encoding": {"key": "Content-Encoding", "type": "str"},
+        "content_language": {"key": "Content-Language", "type": "str"},
+        "content_md5": {"key": "Content-MD5", "type": "bytearray"},
+        "content_disposition": {"key": "Content-Disposition", "type": "str"},
+        "cache_control": {"key": "Cache-Control", "type": "str"},
+        "blob_sequence_number": {"key": "x-ms-blob-sequence-number", "type": "int"},
+        "blob_type": {"key": "BlobType", "type": "str"},
+        "lease_status": {"key": "LeaseStatus", "type": "str"},
+        "lease_state": {"key": "LeaseState", "type": "str"},
+        "lease_duration": {"key": "LeaseDuration", "type": "str"},
+        "copy_id": {"key": "CopyId", "type": "str"},
+        "copy_status": {"key": "CopyStatus", "type": "str"},
+        "copy_source": {"key": "CopySource", "type": "str"},
+        "copy_progress": {"key": "CopyProgress", "type": "str"},
+        "copy_completion_time": {"key": "CopyCompletionTime", "type": "rfc-1123"},
+        "copy_status_description": {"key": "CopyStatusDescription", "type": "str"},
+        "server_encrypted": {"key": "ServerEncrypted", "type": "bool"},
+        "incremental_copy": {"key": "IncrementalCopy", "type": "bool"},
+        "destination_snapshot": {"key": "DestinationSnapshot", "type": "str"},
+        "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"},
+        "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"},
+        "access_tier": {"key": "AccessTier", "type": "str"},
+        "access_tier_inferred": {"key": "AccessTierInferred", "type": "bool"},
+        "archive_status": {"key": "ArchiveStatus", "type": "str"},
+        "customer_provided_key_sha256": {"key": "CustomerProvidedKeySha256", "type": "str"},
+        "encryption_scope": {"key": "EncryptionScope", "type": "str"},
+        "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"},
+        "tag_count": {"key": "TagCount", "type": "int"},
+        "expires_on": {"key": "Expiry-Time", "type": "rfc-1123"},
+        "is_sealed": {"key": "Sealed", "type": "bool"},
+        "rehydrate_priority": {"key": "RehydratePriority", "type": "str"},
+        "last_accessed_on": {"key": "LastAccessTime", "type": "rfc-1123"},
+        "immutability_policy_expires_on": {"key": "ImmutabilityPolicyUntilDate", "type": "rfc-1123"},
+        "immutability_policy_mode": {"key": "ImmutabilityPolicyMode", "type": "str"},
+        "legal_hold": {"key": "LegalHold", "type": "bool"},
+    }
+    _xml_map = {"name": "Properties"}
+
+    def __init__(  # pylint: disable=too-many-locals
+        self,
+        *,
+        last_modified: datetime.datetime,
+        etag: str,
+        creation_time: Optional[datetime.datetime] = None,
+        content_length: Optional[int] = None,
+        content_type: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_md5: Optional[bytes] = None,
+        content_disposition: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        blob_sequence_number: Optional[int] = None,
+        blob_type: Optional[Union[str, "_models.BlobType"]] = None,
+        lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None,
+        lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None,
+        lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None,
+        copy_id: Optional[str] = None,
+        copy_status: Optional[Union[str, "_models.CopyStatusType"]] = None,
+        copy_source: Optional[str] = None,
+        copy_progress: Optional[str] = None,
+        copy_completion_time: Optional[datetime.datetime] = None,
+        copy_status_description: Optional[str] = None,
+        server_encrypted: Optional[bool] = None,
+        incremental_copy: Optional[bool] = None,
+        destination_snapshot: Optional[str] = None,
+        deleted_time: Optional[datetime.datetime] = None,
+        remaining_retention_days: Optional[int] = None,
+        access_tier: Optional[Union[str, "_models.AccessTier"]] = None,
+        access_tier_inferred: Optional[bool] = None,
+        archive_status: Optional[Union[str, "_models.ArchiveStatus"]] = None,
+        customer_provided_key_sha256: Optional[str] = None,
+        encryption_scope: Optional[str] = None,
+        access_tier_change_time: Optional[datetime.datetime] = None,
+        tag_count: Optional[int] = None,
+        expires_on: Optional[datetime.datetime] = None,
+        is_sealed: Optional[bool] = None,
+        rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None,
+        last_accessed_on: Optional[datetime.datetime] = None,
+        immutability_policy_expires_on: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None,
+        legal_hold: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword creation_time:
+        :paramtype creation_time: ~datetime.datetime
+        :keyword last_modified: Required.
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag: Required.
+        :paramtype etag: str
+        :keyword content_length: Size in bytes.
+        :paramtype content_length: int
+        :keyword content_type:
+        :paramtype content_type: str
+        :keyword content_encoding:
+        :paramtype content_encoding: str
+        :keyword content_language:
+        :paramtype content_language: str
+        :keyword content_md5:
+        :paramtype content_md5: bytes
+        :keyword content_disposition:
+        :paramtype content_disposition: str
+        :keyword cache_control:
+        :paramtype cache_control: str
+        :keyword blob_sequence_number:
+        :paramtype blob_sequence_number: int
+        :keyword blob_type: Known values are: "BlockBlob", "PageBlob", and "AppendBlob".
+        :paramtype blob_type: str or ~azure.storage.blob.models.BlobType
+        :keyword lease_status: Known values are: "locked" and "unlocked".
+        :paramtype lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+        :keyword lease_state: Known values are: "available", "leased", "expired", "breaking", and
+         "broken".
+        :paramtype lease_state: str or ~azure.storage.blob.models.LeaseStateType
+        :keyword lease_duration: Known values are: "infinite" and "fixed".
+        :paramtype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+        :keyword copy_id:
+        :paramtype copy_id: str
+        :keyword copy_status: Known values are: "pending", "success", "aborted", and "failed".
+        :paramtype copy_status: str or ~azure.storage.blob.models.CopyStatusType
+        :keyword copy_source:
+        :paramtype copy_source: str
+        :keyword copy_progress:
+        :paramtype copy_progress: str
+        :keyword copy_completion_time:
+        :paramtype copy_completion_time: ~datetime.datetime
+        :keyword copy_status_description:
+        :paramtype copy_status_description: str
+        :keyword server_encrypted:
+        :paramtype server_encrypted: bool
+        :keyword incremental_copy:
+        :paramtype incremental_copy: bool
+        :keyword destination_snapshot:
+        :paramtype destination_snapshot: str
+        :keyword deleted_time:
+        :paramtype deleted_time: ~datetime.datetime
+        :keyword remaining_retention_days:
+        :paramtype remaining_retention_days: int
+        :keyword access_tier: Known values are: "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50",
+         "P60", "P70", "P80", "Hot", "Cool", "Archive", "Premium", and "Cold".
+        :paramtype access_tier: str or ~azure.storage.blob.models.AccessTier
+        :keyword access_tier_inferred:
+        :paramtype access_tier_inferred: bool
+        :keyword archive_status: Known values are: "rehydrate-pending-to-hot",
+         "rehydrate-pending-to-cool", and "rehydrate-pending-to-cold".
+        :paramtype archive_status: str or ~azure.storage.blob.models.ArchiveStatus
+        :keyword customer_provided_key_sha256:
+        :paramtype customer_provided_key_sha256: str
+        :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
+        :paramtype encryption_scope: str
+        :keyword access_tier_change_time:
+        :paramtype access_tier_change_time: ~datetime.datetime
+        :keyword tag_count:
+        :paramtype tag_count: int
+        :keyword expires_on:
+        :paramtype expires_on: ~datetime.datetime
+        :keyword is_sealed:
+        :paramtype is_sealed: bool
+        :keyword rehydrate_priority: If an object is in rehydrate pending state then this header is
+         returned with priority of rehydrate. Valid values are High and Standard. Known values are:
+         "High" and "Standard".
+        :paramtype rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+        :keyword last_accessed_on:
+        :paramtype last_accessed_on: ~datetime.datetime
+        :keyword immutability_policy_expires_on:
+        :paramtype immutability_policy_expires_on: ~datetime.datetime
+        :keyword immutability_policy_mode: Known values are: "Mutable", "Unlocked", and "Locked".
+        :paramtype immutability_policy_mode: str or
+         ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :keyword legal_hold:
+        :paramtype legal_hold: bool
+        """
+        super().__init__(**kwargs)
+        self.creation_time = creation_time
+        self.last_modified = last_modified
+        self.etag = etag
+        self.content_length = content_length
+        self.content_type = content_type
+        self.content_encoding = content_encoding
+        self.content_language = content_language
+        self.content_md5 = content_md5
+        self.content_disposition = content_disposition
+        self.cache_control = cache_control
+        self.blob_sequence_number = blob_sequence_number
+        self.blob_type = blob_type
+        self.lease_status = lease_status
+        self.lease_state = lease_state
+        self.lease_duration = lease_duration
+        self.copy_id = copy_id
+        self.copy_status = copy_status
+        self.copy_source = copy_source
+        self.copy_progress = copy_progress
+        self.copy_completion_time = copy_completion_time
+        self.copy_status_description = copy_status_description
+        self.server_encrypted = server_encrypted
+        self.incremental_copy = incremental_copy
+        self.destination_snapshot = destination_snapshot
+        self.deleted_time = deleted_time
+        self.remaining_retention_days = remaining_retention_days
+        self.access_tier = access_tier
+        self.access_tier_inferred = access_tier_inferred
+        self.archive_status = archive_status
+        self.customer_provided_key_sha256 = customer_provided_key_sha256
+        self.encryption_scope = encryption_scope
+        self.access_tier_change_time = access_tier_change_time
+        self.tag_count = tag_count
+        self.expires_on = expires_on
+        self.is_sealed = is_sealed
+        self.rehydrate_priority = rehydrate_priority
+        self.last_accessed_on = last_accessed_on
+        self.immutability_policy_expires_on = immutability_policy_expires_on
+        self.immutability_policy_mode = immutability_policy_mode
+        self.legal_hold = legal_hold
+
+
+class BlobTag(_serialization.Model):
+    """BlobTag.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar key: Required.
+    :vartype key: str
+    :ivar value: Required.
+    :vartype value: str
+    """
+
+    _validation = {
+        "key": {"required": True},
+        "value": {"required": True},
+    }
+
+    _attribute_map = {
+        "key": {"key": "Key", "type": "str"},
+        "value": {"key": "Value", "type": "str"},
+    }
+    _xml_map = {"name": "Tag"}
+
+    def __init__(self, *, key: str, value: str, **kwargs: Any) -> None:
+        """
+        :keyword key: Required.
+        :paramtype key: str
+        :keyword value: Required.
+        :paramtype value: str
+        """
+        super().__init__(**kwargs)
+        self.key = key
+        self.value = value
+
+
+class BlobTags(_serialization.Model):
+    """Blob tags.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar blob_tag_set: Required.
+    :vartype blob_tag_set: list[~azure.storage.blob.models.BlobTag]
+    """
+
+    _validation = {
+        "blob_tag_set": {"required": True},
+    }
+
+    _attribute_map = {
+        "blob_tag_set": {
+            "key": "BlobTagSet",
+            "type": "[BlobTag]",
+            "xml": {"name": "TagSet", "wrapped": True, "itemsName": "Tag"},
+        },
+    }
+    _xml_map = {"name": "Tags"}
+
+    def __init__(self, *, blob_tag_set: List["_models.BlobTag"], **kwargs: Any) -> None:
+        """
+        :keyword blob_tag_set: Required.
+        :paramtype blob_tag_set: list[~azure.storage.blob.models.BlobTag]
+        """
+        super().__init__(**kwargs)
+        self.blob_tag_set = blob_tag_set
+
+
+class Block(_serialization.Model):
+    """Represents a single block in a block blob.  It describes the block's ID and size.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: The base64 encoded block ID. Required.
+    :vartype name: str
+    :ivar size: The block size in bytes. Required.
+    :vartype size: int
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "size": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "size": {"key": "Size", "type": "int"},
+    }
+
+    def __init__(self, *, name: str, size: int, **kwargs: Any) -> None:
+        """
+        :keyword name: The base64 encoded block ID. Required.
+        :paramtype name: str
+        :keyword size: The block size in bytes. Required.
+        :paramtype size: int
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.size = size
+
+
+class BlockList(_serialization.Model):
+    """BlockList.
+
+    :ivar committed_blocks:
+    :vartype committed_blocks: list[~azure.storage.blob.models.Block]
+    :ivar uncommitted_blocks:
+    :vartype uncommitted_blocks: list[~azure.storage.blob.models.Block]
+    """
+
+    _attribute_map = {
+        "committed_blocks": {"key": "CommittedBlocks", "type": "[Block]", "xml": {"wrapped": True}},
+        "uncommitted_blocks": {"key": "UncommittedBlocks", "type": "[Block]", "xml": {"wrapped": True}},
+    }
+
+    def __init__(
+        self,
+        *,
+        committed_blocks: Optional[List["_models.Block"]] = None,
+        uncommitted_blocks: Optional[List["_models.Block"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword committed_blocks:
+        :paramtype committed_blocks: list[~azure.storage.blob.models.Block]
+        :keyword uncommitted_blocks:
+        :paramtype uncommitted_blocks: list[~azure.storage.blob.models.Block]
+        """
+        super().__init__(**kwargs)
+        self.committed_blocks = committed_blocks
+        self.uncommitted_blocks = uncommitted_blocks
+
+
+class BlockLookupList(_serialization.Model):
+    """BlockLookupList.
+
+    :ivar committed:
+    :vartype committed: list[str]
+    :ivar uncommitted:
+    :vartype uncommitted: list[str]
+    :ivar latest:
+    :vartype latest: list[str]
+    """
+
+    _attribute_map = {
+        "committed": {"key": "Committed", "type": "[str]", "xml": {"itemsName": "Committed"}},
+        "uncommitted": {"key": "Uncommitted", "type": "[str]", "xml": {"itemsName": "Uncommitted"}},
+        "latest": {"key": "Latest", "type": "[str]", "xml": {"itemsName": "Latest"}},
+    }
+    _xml_map = {"name": "BlockList"}
+
+    def __init__(
+        self,
+        *,
+        committed: Optional[List[str]] = None,
+        uncommitted: Optional[List[str]] = None,
+        latest: Optional[List[str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword committed:
+        :paramtype committed: list[str]
+        :keyword uncommitted:
+        :paramtype uncommitted: list[str]
+        :keyword latest:
+        :paramtype latest: list[str]
+        """
+        super().__init__(**kwargs)
+        self.committed = committed
+        self.uncommitted = uncommitted
+        self.latest = latest
+
+
+class ClearRange(_serialization.Model):
+    """ClearRange.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar start: Required.
+    :vartype start: int
+    :ivar end: Required.
+    :vartype end: int
+    """
+
+    _validation = {
+        "start": {"required": True},
+        "end": {"required": True},
+    }
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}},
+        "end": {"key": "End", "type": "int", "xml": {"name": "End"}},
+    }
+    _xml_map = {"name": "ClearRange"}
+
+    def __init__(self, *, start: int, end: int, **kwargs: Any) -> None:
+        """
+        :keyword start: Required.
+        :paramtype start: int
+        :keyword end: Required.
+        :paramtype end: int
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.end = end
+
+
+class ContainerCpkScopeInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar default_encryption_scope: Optional.  Version 2019-07-07 and later.  Specifies the default
+     encryption scope to set on the container and use for all future writes.
+    :vartype default_encryption_scope: str
+    :ivar prevent_encryption_scope_override: Optional.  Version 2019-07-07 and newer.  If true,
+     prevents any request from specifying a different encryption scope than the scope set on the
+     container.
+    :vartype prevent_encryption_scope_override: bool
+    """
+
+    _attribute_map = {
+        "default_encryption_scope": {"key": "DefaultEncryptionScope", "type": "str"},
+        "prevent_encryption_scope_override": {"key": "PreventEncryptionScopeOverride", "type": "bool"},
+    }
+
+    def __init__(
+        self,
+        *,
+        default_encryption_scope: Optional[str] = None,
+        prevent_encryption_scope_override: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword default_encryption_scope: Optional.  Version 2019-07-07 and later.  Specifies the
+         default encryption scope to set on the container and use for all future writes.
+        :paramtype default_encryption_scope: str
+        :keyword prevent_encryption_scope_override: Optional.  Version 2019-07-07 and newer.  If true,
+         prevents any request from specifying a different encryption scope than the scope set on the
+         container.
+        :paramtype prevent_encryption_scope_override: bool
+        """
+        super().__init__(**kwargs)
+        self.default_encryption_scope = default_encryption_scope
+        self.prevent_encryption_scope_override = prevent_encryption_scope_override
+
+
+class ContainerItem(_serialization.Model):
+    """An Azure Storage container.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    :ivar deleted:
+    :vartype deleted: bool
+    :ivar version:
+    :vartype version: str
+    :ivar properties: Properties of a container. Required.
+    :vartype properties: ~azure.storage.blob.models.ContainerProperties
+    :ivar metadata: Dictionary of :code:`<string>`.
+    :vartype metadata: dict[str, str]
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "deleted": {"key": "Deleted", "type": "bool"},
+        "version": {"key": "Version", "type": "str"},
+        "properties": {"key": "Properties", "type": "ContainerProperties"},
+        "metadata": {"key": "Metadata", "type": "{str}"},
+    }
+    _xml_map = {"name": "Container"}
+
+    def __init__(
+        self,
+        *,
+        name: str,
+        properties: "_models.ContainerProperties",
+        deleted: Optional[bool] = None,
+        version: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        :keyword deleted:
+        :paramtype deleted: bool
+        :keyword version:
+        :paramtype version: str
+        :keyword properties: Properties of a container. Required.
+        :paramtype properties: ~azure.storage.blob.models.ContainerProperties
+        :keyword metadata: Dictionary of :code:`<string>`.
+        :paramtype metadata: dict[str, str]
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.deleted = deleted
+        self.version = version
+        self.properties = properties
+        self.metadata = metadata
+
+
+class ContainerProperties(_serialization.Model):
+    """Properties of a container.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar last_modified: Required.
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag: Required.
+    :vartype etag: str
+    :ivar lease_status: Known values are: "locked" and "unlocked".
+    :vartype lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+    :ivar lease_state: Known values are: "available", "leased", "expired", "breaking", and
+     "broken".
+    :vartype lease_state: str or ~azure.storage.blob.models.LeaseStateType
+    :ivar lease_duration: Known values are: "infinite" and "fixed".
+    :vartype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+    :ivar public_access: Known values are: "container" and "blob".
+    :vartype public_access: str or ~azure.storage.blob.models.PublicAccessType
+    :ivar has_immutability_policy:
+    :vartype has_immutability_policy: bool
+    :ivar has_legal_hold:
+    :vartype has_legal_hold: bool
+    :ivar default_encryption_scope:
+    :vartype default_encryption_scope: str
+    :ivar prevent_encryption_scope_override:
+    :vartype prevent_encryption_scope_override: bool
+    :ivar deleted_time:
+    :vartype deleted_time: ~datetime.datetime
+    :ivar remaining_retention_days:
+    :vartype remaining_retention_days: int
+    :ivar is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled
+     on this container.
+    :vartype is_immutable_storage_with_versioning_enabled: bool
+    """
+
+    _validation = {
+        "last_modified": {"required": True},
+        "etag": {"required": True},
+    }
+
+    _attribute_map = {
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+        "lease_status": {"key": "LeaseStatus", "type": "str"},
+        "lease_state": {"key": "LeaseState", "type": "str"},
+        "lease_duration": {"key": "LeaseDuration", "type": "str"},
+        "public_access": {"key": "PublicAccess", "type": "str"},
+        "has_immutability_policy": {"key": "HasImmutabilityPolicy", "type": "bool"},
+        "has_legal_hold": {"key": "HasLegalHold", "type": "bool"},
+        "default_encryption_scope": {"key": "DefaultEncryptionScope", "type": "str"},
+        "prevent_encryption_scope_override": {"key": "DenyEncryptionScopeOverride", "type": "bool"},
+        "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"},
+        "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"},
+        "is_immutable_storage_with_versioning_enabled": {
+            "key": "ImmutableStorageWithVersioningEnabled",
+            "type": "bool",
+        },
+    }
+
+    def __init__(
+        self,
+        *,
+        last_modified: datetime.datetime,
+        etag: str,
+        lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None,
+        lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None,
+        lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None,
+        public_access: Optional[Union[str, "_models.PublicAccessType"]] = None,
+        has_immutability_policy: Optional[bool] = None,
+        has_legal_hold: Optional[bool] = None,
+        default_encryption_scope: Optional[str] = None,
+        prevent_encryption_scope_override: Optional[bool] = None,
+        deleted_time: Optional[datetime.datetime] = None,
+        remaining_retention_days: Optional[int] = None,
+        is_immutable_storage_with_versioning_enabled: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword last_modified: Required.
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag: Required.
+        :paramtype etag: str
+        :keyword lease_status: Known values are: "locked" and "unlocked".
+        :paramtype lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+        :keyword lease_state: Known values are: "available", "leased", "expired", "breaking", and
+         "broken".
+        :paramtype lease_state: str or ~azure.storage.blob.models.LeaseStateType
+        :keyword lease_duration: Known values are: "infinite" and "fixed".
+        :paramtype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+        :keyword public_access: Known values are: "container" and "blob".
+        :paramtype public_access: str or ~azure.storage.blob.models.PublicAccessType
+        :keyword has_immutability_policy:
+        :paramtype has_immutability_policy: bool
+        :keyword has_legal_hold:
+        :paramtype has_legal_hold: bool
+        :keyword default_encryption_scope:
+        :paramtype default_encryption_scope: str
+        :keyword prevent_encryption_scope_override:
+        :paramtype prevent_encryption_scope_override: bool
+        :keyword deleted_time:
+        :paramtype deleted_time: ~datetime.datetime
+        :keyword remaining_retention_days:
+        :paramtype remaining_retention_days: int
+        :keyword is_immutable_storage_with_versioning_enabled: Indicates if version level worm is
+         enabled on this container.
+        :paramtype is_immutable_storage_with_versioning_enabled: bool
+        """
+        super().__init__(**kwargs)
+        self.last_modified = last_modified
+        self.etag = etag
+        self.lease_status = lease_status
+        self.lease_state = lease_state
+        self.lease_duration = lease_duration
+        self.public_access = public_access
+        self.has_immutability_policy = has_immutability_policy
+        self.has_legal_hold = has_legal_hold
+        self.default_encryption_scope = default_encryption_scope
+        self.prevent_encryption_scope_override = prevent_encryption_scope_override
+        self.deleted_time = deleted_time
+        self.remaining_retention_days = remaining_retention_days
+        self.is_immutable_storage_with_versioning_enabled = is_immutable_storage_with_versioning_enabled
+
+
+class CorsRule(_serialization.Model):
+    """CORS is an HTTP feature that enables a web application running under one domain to access
+    resources in another domain. Web browsers implement a security restriction known as same-origin
+    policy that prevents a web page from calling APIs in a different domain; CORS provides a secure
+    way to allow one domain (the origin domain) to call APIs in another domain.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar allowed_origins: The origin domains that are permitted to make a request against the
+     storage service via CORS. The origin domain is the domain from which the request originates.
+     Note that the origin must be an exact case-sensitive match with the origin that the user age
+     sends to the service. You can also use the wildcard character '*' to allow all origin domains
+     to make requests via CORS. Required.
+    :vartype allowed_origins: str
+    :ivar allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a
+     CORS request. (comma separated). Required.
+    :vartype allowed_methods: str
+    :ivar allowed_headers: the request headers that the origin domain may specify on the CORS
+     request. Required.
+    :vartype allowed_headers: str
+    :ivar exposed_headers: The response headers that may be sent in the response to the CORS
+     request and exposed by the browser to the request issuer. Required.
+    :vartype exposed_headers: str
+    :ivar max_age_in_seconds: The maximum amount time that a browser should cache the preflight
+     OPTIONS request. Required.
+    :vartype max_age_in_seconds: int
+    """
+
+    _validation = {
+        "allowed_origins": {"required": True},
+        "allowed_methods": {"required": True},
+        "allowed_headers": {"required": True},
+        "exposed_headers": {"required": True},
+        "max_age_in_seconds": {"required": True, "minimum": 0},
+    }
+
+    _attribute_map = {
+        "allowed_origins": {"key": "AllowedOrigins", "type": "str"},
+        "allowed_methods": {"key": "AllowedMethods", "type": "str"},
+        "allowed_headers": {"key": "AllowedHeaders", "type": "str"},
+        "exposed_headers": {"key": "ExposedHeaders", "type": "str"},
+        "max_age_in_seconds": {"key": "MaxAgeInSeconds", "type": "int"},
+    }
+
+    def __init__(
+        self,
+        *,
+        allowed_origins: str,
+        allowed_methods: str,
+        allowed_headers: str,
+        exposed_headers: str,
+        max_age_in_seconds: int,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword allowed_origins: The origin domains that are permitted to make a request against the
+         storage service via CORS. The origin domain is the domain from which the request originates.
+         Note that the origin must be an exact case-sensitive match with the origin that the user age
+         sends to the service. You can also use the wildcard character '*' to allow all origin domains
+         to make requests via CORS. Required.
+        :paramtype allowed_origins: str
+        :keyword allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a
+         CORS request. (comma separated). Required.
+        :paramtype allowed_methods: str
+        :keyword allowed_headers: the request headers that the origin domain may specify on the CORS
+         request. Required.
+        :paramtype allowed_headers: str
+        :keyword exposed_headers: The response headers that may be sent in the response to the CORS
+         request and exposed by the browser to the request issuer. Required.
+        :paramtype exposed_headers: str
+        :keyword max_age_in_seconds: The maximum amount time that a browser should cache the preflight
+         OPTIONS request. Required.
+        :paramtype max_age_in_seconds: int
+        """
+        super().__init__(**kwargs)
+        self.allowed_origins = allowed_origins
+        self.allowed_methods = allowed_methods
+        self.allowed_headers = allowed_headers
+        self.exposed_headers = exposed_headers
+        self.max_age_in_seconds = max_age_in_seconds
+
+
+class CpkInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+     provided in the request. If not specified, encryption is performed with the root account
+     encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+    :vartype encryption_key: str
+    :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided
+     if the x-ms-encryption-key header is provided.
+    :vartype encryption_key_sha256: str
+    :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently,
+     the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
+     provided. Known values are: "None" and "AES256".
+    :vartype encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType
+    """
+
+    _attribute_map = {
+        "encryption_key": {"key": "encryptionKey", "type": "str"},
+        "encryption_key_sha256": {"key": "encryptionKeySha256", "type": "str"},
+        "encryption_algorithm": {"key": "encryptionAlgorithm", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        encryption_key: Optional[str] = None,
+        encryption_key_sha256: Optional[str] = None,
+        encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+         provided in the request. If not specified, encryption is performed with the root account
+         encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+        :paramtype encryption_key: str
+        :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be
+         provided if the x-ms-encryption-key header is provided.
+        :paramtype encryption_key_sha256: str
+        :keyword encryption_algorithm: The algorithm used to produce the encryption key hash.
+         Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+         header is provided. Known values are: "None" and "AES256".
+        :paramtype encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType
+        """
+        super().__init__(**kwargs)
+        self.encryption_key = encryption_key
+        self.encryption_key_sha256 = encryption_key_sha256
+        self.encryption_algorithm = encryption_algorithm
+
+
+class CpkScopeInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar encryption_scope: Optional. Version 2019-07-07 and later.  Specifies the name of the
+     encryption scope to use to encrypt the data provided in the request. If not specified,
+     encryption is performed with the default account encryption scope.  For more information, see
+     Encryption at Rest for Azure Storage Services.
+    :vartype encryption_scope: str
+    """
+
+    _attribute_map = {
+        "encryption_scope": {"key": "encryptionScope", "type": "str"},
+    }
+
+    def __init__(self, *, encryption_scope: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword encryption_scope: Optional. Version 2019-07-07 and later.  Specifies the name of the
+         encryption scope to use to encrypt the data provided in the request. If not specified,
+         encryption is performed with the default account encryption scope.  For more information, see
+         Encryption at Rest for Azure Storage Services.
+        :paramtype encryption_scope: str
+        """
+        super().__init__(**kwargs)
+        self.encryption_scope = encryption_scope
+
+
+class DelimitedTextConfiguration(_serialization.Model):
+    """Groups the settings used for interpreting the blob data if the blob is delimited text
+    formatted.
+
+    :ivar column_separator: The string used to separate columns.
+    :vartype column_separator: str
+    :ivar field_quote: The string used to quote a specific field.
+    :vartype field_quote: str
+    :ivar record_separator: The string used to separate records.
+    :vartype record_separator: str
+    :ivar escape_char: The string used as an escape character.
+    :vartype escape_char: str
+    :ivar headers_present: Represents whether the data has headers.
+    :vartype headers_present: bool
+    """
+
+    _attribute_map = {
+        "column_separator": {"key": "ColumnSeparator", "type": "str", "xml": {"name": "ColumnSeparator"}},
+        "field_quote": {"key": "FieldQuote", "type": "str", "xml": {"name": "FieldQuote"}},
+        "record_separator": {"key": "RecordSeparator", "type": "str", "xml": {"name": "RecordSeparator"}},
+        "escape_char": {"key": "EscapeChar", "type": "str", "xml": {"name": "EscapeChar"}},
+        "headers_present": {"key": "HeadersPresent", "type": "bool", "xml": {"name": "HasHeaders"}},
+    }
+    _xml_map = {"name": "DelimitedTextConfiguration"}
+
+    def __init__(
+        self,
+        *,
+        column_separator: Optional[str] = None,
+        field_quote: Optional[str] = None,
+        record_separator: Optional[str] = None,
+        escape_char: Optional[str] = None,
+        headers_present: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword column_separator: The string used to separate columns.
+        :paramtype column_separator: str
+        :keyword field_quote: The string used to quote a specific field.
+        :paramtype field_quote: str
+        :keyword record_separator: The string used to separate records.
+        :paramtype record_separator: str
+        :keyword escape_char: The string used as an escape character.
+        :paramtype escape_char: str
+        :keyword headers_present: Represents whether the data has headers.
+        :paramtype headers_present: bool
+        """
+        super().__init__(**kwargs)
+        self.column_separator = column_separator
+        self.field_quote = field_quote
+        self.record_separator = record_separator
+        self.escape_char = escape_char
+        self.headers_present = headers_present
+
+
+class FilterBlobItem(_serialization.Model):
+    """Blob info from a Filter Blobs API call.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    :ivar container_name: Required.
+    :vartype container_name: str
+    :ivar tags: Blob tags.
+    :vartype tags: ~azure.storage.blob.models.BlobTags
+    :ivar version_id:
+    :vartype version_id: str
+    :ivar is_current_version:
+    :vartype is_current_version: bool
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "container_name": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "container_name": {"key": "ContainerName", "type": "str"},
+        "tags": {"key": "Tags", "type": "BlobTags"},
+        "version_id": {"key": "VersionId", "type": "str"},
+        "is_current_version": {"key": "IsCurrentVersion", "type": "bool"},
+    }
+    _xml_map = {"name": "Blob"}
+
+    def __init__(
+        self,
+        *,
+        name: str,
+        container_name: str,
+        tags: Optional["_models.BlobTags"] = None,
+        version_id: Optional[str] = None,
+        is_current_version: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        :keyword container_name: Required.
+        :paramtype container_name: str
+        :keyword tags: Blob tags.
+        :paramtype tags: ~azure.storage.blob.models.BlobTags
+        :keyword version_id:
+        :paramtype version_id: str
+        :keyword is_current_version:
+        :paramtype is_current_version: bool
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.container_name = container_name
+        self.tags = tags
+        self.version_id = version_id
+        self.is_current_version = is_current_version
+
+
+class FilterBlobSegment(_serialization.Model):
+    """The result of a Filter Blobs API call.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar where: Required.
+    :vartype where: str
+    :ivar blobs: Required.
+    :vartype blobs: list[~azure.storage.blob.models.FilterBlobItem]
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "where": {"required": True},
+        "blobs": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "where": {"key": "Where", "type": "str"},
+        "blobs": {
+            "key": "Blobs",
+            "type": "[FilterBlobItem]",
+            "xml": {"name": "Blobs", "wrapped": True, "itemsName": "Blob"},
+        },
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        where: str,
+        blobs: List["_models.FilterBlobItem"],
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword where: Required.
+        :paramtype where: str
+        :keyword blobs: Required.
+        :paramtype blobs: list[~azure.storage.blob.models.FilterBlobItem]
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.where = where
+        self.blobs = blobs
+        self.next_marker = next_marker
+
+
+class GeoReplication(_serialization.Model):
+    """Geo-Replication information for the Secondary Storage Service.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar status: The status of the secondary location. Required. Known values are: "live",
+     "bootstrap", and "unavailable".
+    :vartype status: str or ~azure.storage.blob.models.GeoReplicationStatusType
+    :ivar last_sync_time: A GMT date/time value, to the second. All primary writes preceding this
+     value are guaranteed to be available for read operations at the secondary. Primary writes after
+     this point in time may or may not be available for reads. Required.
+    :vartype last_sync_time: ~datetime.datetime
+    """
+
+    _validation = {
+        "status": {"required": True},
+        "last_sync_time": {"required": True},
+    }
+
+    _attribute_map = {
+        "status": {"key": "Status", "type": "str"},
+        "last_sync_time": {"key": "LastSyncTime", "type": "rfc-1123"},
+    }
+
+    def __init__(
+        self,
+        *,
+        status: Union[str, "_models.GeoReplicationStatusType"],
+        last_sync_time: datetime.datetime,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword status: The status of the secondary location. Required. Known values are: "live",
+         "bootstrap", and "unavailable".
+        :paramtype status: str or ~azure.storage.blob.models.GeoReplicationStatusType
+        :keyword last_sync_time: A GMT date/time value, to the second. All primary writes preceding
+         this value are guaranteed to be available for read operations at the secondary. Primary writes
+         after this point in time may or may not be available for reads. Required.
+        :paramtype last_sync_time: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.status = status
+        self.last_sync_time = last_sync_time
+
+
+class JsonTextConfiguration(_serialization.Model):
+    """json text configuration.
+
+    :ivar record_separator: The string used to separate records.
+    :vartype record_separator: str
+    """
+
+    _attribute_map = {
+        "record_separator": {"key": "RecordSeparator", "type": "str", "xml": {"name": "RecordSeparator"}},
+    }
+    _xml_map = {"name": "JsonTextConfiguration"}
+
+    def __init__(self, *, record_separator: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword record_separator: The string used to separate records.
+        :paramtype record_separator: str
+        """
+        super().__init__(**kwargs)
+        self.record_separator = record_separator
+
+
+class KeyInfo(_serialization.Model):
+    """Key information.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar start: The date-time the key is active in ISO 8601 UTC time. Required.
+    :vartype start: str
+    :ivar expiry: The date-time the key expires in ISO 8601 UTC time. Required.
+    :vartype expiry: str
+    """
+
+    _validation = {
+        "start": {"required": True},
+        "expiry": {"required": True},
+    }
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "str"},
+        "expiry": {"key": "Expiry", "type": "str"},
+    }
+
+    def __init__(self, *, start: str, expiry: str, **kwargs: Any) -> None:
+        """
+        :keyword start: The date-time the key is active in ISO 8601 UTC time. Required.
+        :paramtype start: str
+        :keyword expiry: The date-time the key expires in ISO 8601 UTC time. Required.
+        :paramtype expiry: str
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.expiry = expiry
+
+
+class LeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and
+     matches this ID.
+    :vartype lease_id: str
+    """
+
+    _attribute_map = {
+        "lease_id": {"key": "leaseId", "type": "str"},
+    }
+
+    def __init__(self, *, lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active
+         and matches this ID.
+        :paramtype lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.lease_id = lease_id
+
+
+class ListBlobsFlatSegmentResponse(_serialization.Model):
+    """An enumeration of blobs.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar container_name: Required.
+    :vartype container_name: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar segment: Required.
+    :vartype segment: ~azure.storage.blob.models.BlobFlatListSegment
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "container_name": {"required": True},
+        "segment": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "segment": {"key": "Segment", "type": "BlobFlatListSegment"},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        container_name: str,
+        segment: "_models.BlobFlatListSegment",
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword container_name: Required.
+        :paramtype container_name: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword segment: Required.
+        :paramtype segment: ~azure.storage.blob.models.BlobFlatListSegment
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.container_name = container_name
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.segment = segment
+        self.next_marker = next_marker
+
+
+class ListBlobsHierarchySegmentResponse(_serialization.Model):
+    """An enumeration of blobs.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar container_name: Required.
+    :vartype container_name: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar delimiter:
+    :vartype delimiter: str
+    :ivar segment: Required.
+    :vartype segment: ~azure.storage.blob.models.BlobHierarchyListSegment
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "container_name": {"required": True},
+        "segment": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "delimiter": {"key": "Delimiter", "type": "str"},
+        "segment": {"key": "Segment", "type": "BlobHierarchyListSegment"},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        container_name: str,
+        segment: "_models.BlobHierarchyListSegment",
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        delimiter: Optional[str] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword container_name: Required.
+        :paramtype container_name: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword delimiter:
+        :paramtype delimiter: str
+        :keyword segment: Required.
+        :paramtype segment: ~azure.storage.blob.models.BlobHierarchyListSegment
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.container_name = container_name
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.delimiter = delimiter
+        self.segment = segment
+        self.next_marker = next_marker
+
+
+class ListContainersSegmentResponse(_serialization.Model):
+    """An enumeration of containers.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar container_items: Required.
+    :vartype container_items: list[~azure.storage.blob.models.ContainerItem]
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "container_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "container_items": {
+            "key": "ContainerItems",
+            "type": "[ContainerItem]",
+            "xml": {"name": "Containers", "wrapped": True, "itemsName": "Container"},
+        },
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        container_items: List["_models.ContainerItem"],
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword container_items: Required.
+        :paramtype container_items: list[~azure.storage.blob.models.ContainerItem]
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.container_items = container_items
+        self.next_marker = next_marker
+
+
+class Logging(_serialization.Model):
+    """Azure Analytics Logging settings.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar version: The version of Storage Analytics to configure. Required.
+    :vartype version: str
+    :ivar delete: Indicates whether all delete requests should be logged. Required.
+    :vartype delete: bool
+    :ivar read: Indicates whether all read requests should be logged. Required.
+    :vartype read: bool
+    :ivar write: Indicates whether all write requests should be logged. Required.
+    :vartype write: bool
+    :ivar retention_policy: the retention policy which determines how long the associated data
+     should persist. Required.
+    :vartype retention_policy: ~azure.storage.blob.models.RetentionPolicy
+    """
+
+    _validation = {
+        "version": {"required": True},
+        "delete": {"required": True},
+        "read": {"required": True},
+        "write": {"required": True},
+        "retention_policy": {"required": True},
+    }
+
+    _attribute_map = {
+        "version": {"key": "Version", "type": "str"},
+        "delete": {"key": "Delete", "type": "bool"},
+        "read": {"key": "Read", "type": "bool"},
+        "write": {"key": "Write", "type": "bool"},
+        "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"},
+    }
+
+    def __init__(
+        self,
+        *,
+        version: str,
+        delete: bool,
+        read: bool,
+        write: bool,
+        retention_policy: "_models.RetentionPolicy",
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword version: The version of Storage Analytics to configure. Required.
+        :paramtype version: str
+        :keyword delete: Indicates whether all delete requests should be logged. Required.
+        :paramtype delete: bool
+        :keyword read: Indicates whether all read requests should be logged. Required.
+        :paramtype read: bool
+        :keyword write: Indicates whether all write requests should be logged. Required.
+        :paramtype write: bool
+        :keyword retention_policy: the retention policy which determines how long the associated data
+         should persist. Required.
+        :paramtype retention_policy: ~azure.storage.blob.models.RetentionPolicy
+        """
+        super().__init__(**kwargs)
+        self.version = version
+        self.delete = delete
+        self.read = read
+        self.write = write
+        self.retention_policy = retention_policy
+
+
+class Metrics(_serialization.Model):
+    """a summary of request statistics grouped by API in hour or minute aggregates for blobs.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar version: The version of Storage Analytics to configure.
+    :vartype version: str
+    :ivar enabled: Indicates whether metrics are enabled for the Blob service. Required.
+    :vartype enabled: bool
+    :ivar include_apis: Indicates whether metrics should generate summary statistics for called API
+     operations.
+    :vartype include_apis: bool
+    :ivar retention_policy: the retention policy which determines how long the associated data
+     should persist.
+    :vartype retention_policy: ~azure.storage.blob.models.RetentionPolicy
+    """
+
+    _validation = {
+        "enabled": {"required": True},
+    }
+
+    _attribute_map = {
+        "version": {"key": "Version", "type": "str"},
+        "enabled": {"key": "Enabled", "type": "bool"},
+        "include_apis": {"key": "IncludeAPIs", "type": "bool"},
+        "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"},
+    }
+
+    def __init__(
+        self,
+        *,
+        enabled: bool,
+        version: Optional[str] = None,
+        include_apis: Optional[bool] = None,
+        retention_policy: Optional["_models.RetentionPolicy"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword version: The version of Storage Analytics to configure.
+        :paramtype version: str
+        :keyword enabled: Indicates whether metrics are enabled for the Blob service. Required.
+        :paramtype enabled: bool
+        :keyword include_apis: Indicates whether metrics should generate summary statistics for called
+         API operations.
+        :paramtype include_apis: bool
+        :keyword retention_policy: the retention policy which determines how long the associated data
+         should persist.
+        :paramtype retention_policy: ~azure.storage.blob.models.RetentionPolicy
+        """
+        super().__init__(**kwargs)
+        self.version = version
+        self.enabled = enabled
+        self.include_apis = include_apis
+        self.retention_policy = retention_policy
+
+
+class ModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar if_modified_since: Specify this header value to operate only on a blob if it has been
+     modified since the specified date/time.
+    :vartype if_modified_since: ~datetime.datetime
+    :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not
+     been modified since the specified date/time.
+    :vartype if_unmodified_since: ~datetime.datetime
+    :ivar if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype if_match: str
+    :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value.
+    :vartype if_none_match: str
+    :ivar if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a matching
+     value.
+    :vartype if_tags: str
+    """
+
+    _attribute_map = {
+        "if_modified_since": {"key": "ifModifiedSince", "type": "rfc-1123"},
+        "if_unmodified_since": {"key": "ifUnmodifiedSince", "type": "rfc-1123"},
+        "if_match": {"key": "ifMatch", "type": "str"},
+        "if_none_match": {"key": "ifNoneMatch", "type": "str"},
+        "if_tags": {"key": "ifTags", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        if_modified_since: Optional[datetime.datetime] = None,
+        if_unmodified_since: Optional[datetime.datetime] = None,
+        if_match: Optional[str] = None,
+        if_none_match: Optional[str] = None,
+        if_tags: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword if_modified_since: Specify this header value to operate only on a blob if it has been
+         modified since the specified date/time.
+        :paramtype if_modified_since: ~datetime.datetime
+        :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not
+         been modified since the specified date/time.
+        :paramtype if_unmodified_since: ~datetime.datetime
+        :keyword if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype if_match: str
+        :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching
+         value.
+        :paramtype if_none_match: str
+        :keyword if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a
+         matching value.
+        :paramtype if_tags: str
+        """
+        super().__init__(**kwargs)
+        self.if_modified_since = if_modified_since
+        self.if_unmodified_since = if_unmodified_since
+        self.if_match = if_match
+        self.if_none_match = if_none_match
+        self.if_tags = if_tags
+
+
+class PageList(_serialization.Model):
+    """the list of pages.
+
+    :ivar page_range:
+    :vartype page_range: list[~azure.storage.blob.models.PageRange]
+    :ivar clear_range:
+    :vartype clear_range: list[~azure.storage.blob.models.ClearRange]
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _attribute_map = {
+        "page_range": {"key": "PageRange", "type": "[PageRange]", "xml": {"itemsName": "PageRange"}},
+        "clear_range": {"key": "ClearRange", "type": "[ClearRange]", "xml": {"itemsName": "ClearRange"}},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        page_range: Optional[List["_models.PageRange"]] = None,
+        clear_range: Optional[List["_models.ClearRange"]] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword page_range:
+        :paramtype page_range: list[~azure.storage.blob.models.PageRange]
+        :keyword clear_range:
+        :paramtype clear_range: list[~azure.storage.blob.models.ClearRange]
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.page_range = page_range
+        self.clear_range = clear_range
+        self.next_marker = next_marker
+
+
+class PageRange(_serialization.Model):
+    """PageRange.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar start: Required.
+    :vartype start: int
+    :ivar end: Required.
+    :vartype end: int
+    """
+
+    _validation = {
+        "start": {"required": True},
+        "end": {"required": True},
+    }
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}},
+        "end": {"key": "End", "type": "int", "xml": {"name": "End"}},
+    }
+    _xml_map = {"name": "PageRange"}
+
+    def __init__(self, *, start: int, end: int, **kwargs: Any) -> None:
+        """
+        :keyword start: Required.
+        :paramtype start: int
+        :keyword end: Required.
+        :paramtype end: int
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.end = end
+
+
+class QueryFormat(_serialization.Model):
+    """QueryFormat.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar type: The quick query format type. Required. Known values are: "delimited", "json",
+     "arrow", and "parquet".
+    :vartype type: str or ~azure.storage.blob.models.QueryFormatType
+    :ivar delimited_text_configuration: Groups the settings used for interpreting the blob data if
+     the blob is delimited text formatted.
+    :vartype delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration
+    :ivar json_text_configuration: json text configuration.
+    :vartype json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration
+    :ivar arrow_configuration: Groups the settings used for formatting the response if the response
+     should be Arrow formatted.
+    :vartype arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration
+    :ivar parquet_text_configuration: parquet configuration.
+    :vartype parquet_text_configuration: JSON
+    """
+
+    _validation = {
+        "type": {"required": True},
+    }
+
+    _attribute_map = {
+        "type": {"key": "Type", "type": "str", "xml": {"name": "Type"}},
+        "delimited_text_configuration": {"key": "DelimitedTextConfiguration", "type": "DelimitedTextConfiguration"},
+        "json_text_configuration": {"key": "JsonTextConfiguration", "type": "JsonTextConfiguration"},
+        "arrow_configuration": {"key": "ArrowConfiguration", "type": "ArrowConfiguration"},
+        "parquet_text_configuration": {"key": "ParquetTextConfiguration", "type": "object"},
+    }
+
+    def __init__(
+        self,
+        *,
+        type: Union[str, "_models.QueryFormatType"],
+        delimited_text_configuration: Optional["_models.DelimitedTextConfiguration"] = None,
+        json_text_configuration: Optional["_models.JsonTextConfiguration"] = None,
+        arrow_configuration: Optional["_models.ArrowConfiguration"] = None,
+        parquet_text_configuration: Optional[JSON] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword type: The quick query format type. Required. Known values are: "delimited", "json",
+         "arrow", and "parquet".
+        :paramtype type: str or ~azure.storage.blob.models.QueryFormatType
+        :keyword delimited_text_configuration: Groups the settings used for interpreting the blob data
+         if the blob is delimited text formatted.
+        :paramtype delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration
+        :keyword json_text_configuration: json text configuration.
+        :paramtype json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration
+        :keyword arrow_configuration: Groups the settings used for formatting the response if the
+         response should be Arrow formatted.
+        :paramtype arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration
+        :keyword parquet_text_configuration: parquet configuration.
+        :paramtype parquet_text_configuration: JSON
+        """
+        super().__init__(**kwargs)
+        self.type = type
+        self.delimited_text_configuration = delimited_text_configuration
+        self.json_text_configuration = json_text_configuration
+        self.arrow_configuration = arrow_configuration
+        self.parquet_text_configuration = parquet_text_configuration
+
+
+class QueryRequest(_serialization.Model):
+    """Groups the set of query request settings.
+
+    Variables are only populated by the server, and will be ignored when sending a request.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar query_type: Required. The type of the provided query expression. Required. Default value
+     is "SQL".
+    :vartype query_type: str
+    :ivar expression: The query expression in SQL. The maximum size of the query expression is
+     256KiB. Required.
+    :vartype expression: str
+    :ivar input_serialization:
+    :vartype input_serialization: ~azure.storage.blob.models.QuerySerialization
+    :ivar output_serialization:
+    :vartype output_serialization: ~azure.storage.blob.models.QuerySerialization
+    """
+
+    _validation = {
+        "query_type": {"required": True, "constant": True},
+        "expression": {"required": True},
+    }
+
+    _attribute_map = {
+        "query_type": {"key": "QueryType", "type": "str", "xml": {"name": "QueryType"}},
+        "expression": {"key": "Expression", "type": "str", "xml": {"name": "Expression"}},
+        "input_serialization": {"key": "InputSerialization", "type": "QuerySerialization"},
+        "output_serialization": {"key": "OutputSerialization", "type": "QuerySerialization"},
+    }
+    _xml_map = {"name": "QueryRequest"}
+
+    query_type = "SQL"
+
+    def __init__(
+        self,
+        *,
+        expression: str,
+        input_serialization: Optional["_models.QuerySerialization"] = None,
+        output_serialization: Optional["_models.QuerySerialization"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword expression: The query expression in SQL. The maximum size of the query expression is
+         256KiB. Required.
+        :paramtype expression: str
+        :keyword input_serialization:
+        :paramtype input_serialization: ~azure.storage.blob.models.QuerySerialization
+        :keyword output_serialization:
+        :paramtype output_serialization: ~azure.storage.blob.models.QuerySerialization
+        """
+        super().__init__(**kwargs)
+        self.expression = expression
+        self.input_serialization = input_serialization
+        self.output_serialization = output_serialization
+
+
+class QuerySerialization(_serialization.Model):
+    """QuerySerialization.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar format: Required.
+    :vartype format: ~azure.storage.blob.models.QueryFormat
+    """
+
+    _validation = {
+        "format": {"required": True},
+    }
+
+    _attribute_map = {
+        "format": {"key": "Format", "type": "QueryFormat"},
+    }
+
+    def __init__(self, *, format: "_models.QueryFormat", **kwargs: Any) -> None:
+        """
+        :keyword format: Required.
+        :paramtype format: ~azure.storage.blob.models.QueryFormat
+        """
+        super().__init__(**kwargs)
+        self.format = format
+
+
+class RetentionPolicy(_serialization.Model):
+    """the retention policy which determines how long the associated data should persist.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar enabled: Indicates whether a retention policy is enabled for the storage service.
+     Required.
+    :vartype enabled: bool
+    :ivar days: Indicates the number of days that metrics or logging or soft-deleted data should be
+     retained. All data older than this value will be deleted.
+    :vartype days: int
+    :ivar allow_permanent_delete: Indicates whether permanent delete is allowed on this storage
+     account.
+    :vartype allow_permanent_delete: bool
+    """
+
+    _validation = {
+        "enabled": {"required": True},
+        "days": {"minimum": 1},
+    }
+
+    _attribute_map = {
+        "enabled": {"key": "Enabled", "type": "bool"},
+        "days": {"key": "Days", "type": "int"},
+        "allow_permanent_delete": {"key": "AllowPermanentDelete", "type": "bool"},
+    }
+
+    def __init__(
+        self, *, enabled: bool, days: Optional[int] = None, allow_permanent_delete: Optional[bool] = None, **kwargs: Any
+    ) -> None:
+        """
+        :keyword enabled: Indicates whether a retention policy is enabled for the storage service.
+         Required.
+        :paramtype enabled: bool
+        :keyword days: Indicates the number of days that metrics or logging or soft-deleted data should
+         be retained. All data older than this value will be deleted.
+        :paramtype days: int
+        :keyword allow_permanent_delete: Indicates whether permanent delete is allowed on this storage
+         account.
+        :paramtype allow_permanent_delete: bool
+        """
+        super().__init__(**kwargs)
+        self.enabled = enabled
+        self.days = days
+        self.allow_permanent_delete = allow_permanent_delete
+
+
+class SequenceNumberAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a
+     blob if it has a sequence number less than or equal to the specified.
+    :vartype if_sequence_number_less_than_or_equal_to: int
+    :ivar if_sequence_number_less_than: Specify this header value to operate only on a blob if it
+     has a sequence number less than the specified.
+    :vartype if_sequence_number_less_than: int
+    :ivar if_sequence_number_equal_to: Specify this header value to operate only on a blob if it
+     has the specified sequence number.
+    :vartype if_sequence_number_equal_to: int
+    """
+
+    _attribute_map = {
+        "if_sequence_number_less_than_or_equal_to": {"key": "ifSequenceNumberLessThanOrEqualTo", "type": "int"},
+        "if_sequence_number_less_than": {"key": "ifSequenceNumberLessThan", "type": "int"},
+        "if_sequence_number_equal_to": {"key": "ifSequenceNumberEqualTo", "type": "int"},
+    }
+
+    def __init__(
+        self,
+        *,
+        if_sequence_number_less_than_or_equal_to: Optional[int] = None,
+        if_sequence_number_less_than: Optional[int] = None,
+        if_sequence_number_equal_to: Optional[int] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on
+         a blob if it has a sequence number less than or equal to the specified.
+        :paramtype if_sequence_number_less_than_or_equal_to: int
+        :keyword if_sequence_number_less_than: Specify this header value to operate only on a blob if
+         it has a sequence number less than the specified.
+        :paramtype if_sequence_number_less_than: int
+        :keyword if_sequence_number_equal_to: Specify this header value to operate only on a blob if it
+         has the specified sequence number.
+        :paramtype if_sequence_number_equal_to: int
+        """
+        super().__init__(**kwargs)
+        self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to
+        self.if_sequence_number_less_than = if_sequence_number_less_than
+        self.if_sequence_number_equal_to = if_sequence_number_equal_to
+
+
+class SignedIdentifier(_serialization.Model):
+    """signed identifier.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar id: a unique id. Required.
+    :vartype id: str
+    :ivar access_policy: An Access policy.
+    :vartype access_policy: ~azure.storage.blob.models.AccessPolicy
+    """
+
+    _validation = {
+        "id": {"required": True},
+    }
+
+    _attribute_map = {
+        "id": {"key": "Id", "type": "str"},
+        "access_policy": {"key": "AccessPolicy", "type": "AccessPolicy"},
+    }
+    _xml_map = {"name": "SignedIdentifier"}
+
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        access_policy: Optional["_models.AccessPolicy"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword id: a unique id. Required.
+        :paramtype id: str
+        :keyword access_policy: An Access policy.
+        :paramtype access_policy: ~azure.storage.blob.models.AccessPolicy
+        """
+        super().__init__(**kwargs)
+        self.id = id
+        self.access_policy = access_policy
+
+
+class SourceModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has
+     been modified since the specified date/time.
+    :vartype source_if_modified_since: ~datetime.datetime
+    :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has
+     not been modified since the specified date/time.
+    :vartype source_if_unmodified_since: ~datetime.datetime
+    :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype source_if_match: str
+    :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching
+     value.
+    :vartype source_if_none_match: str
+    :ivar source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a
+     matching value.
+    :vartype source_if_tags: str
+    """
+
+    _attribute_map = {
+        "source_if_modified_since": {"key": "sourceIfModifiedSince", "type": "rfc-1123"},
+        "source_if_unmodified_since": {"key": "sourceIfUnmodifiedSince", "type": "rfc-1123"},
+        "source_if_match": {"key": "sourceIfMatch", "type": "str"},
+        "source_if_none_match": {"key": "sourceIfNoneMatch", "type": "str"},
+        "source_if_tags": {"key": "sourceIfTags", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        source_if_modified_since: Optional[datetime.datetime] = None,
+        source_if_unmodified_since: Optional[datetime.datetime] = None,
+        source_if_match: Optional[str] = None,
+        source_if_none_match: Optional[str] = None,
+        source_if_tags: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword source_if_modified_since: Specify this header value to operate only on a blob if it
+         has been modified since the specified date/time.
+        :paramtype source_if_modified_since: ~datetime.datetime
+        :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it
+         has not been modified since the specified date/time.
+        :paramtype source_if_unmodified_since: ~datetime.datetime
+        :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype source_if_match: str
+        :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a
+         matching value.
+        :paramtype source_if_none_match: str
+        :keyword source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with
+         a matching value.
+        :paramtype source_if_tags: str
+        """
+        super().__init__(**kwargs)
+        self.source_if_modified_since = source_if_modified_since
+        self.source_if_unmodified_since = source_if_unmodified_since
+        self.source_if_match = source_if_match
+        self.source_if_none_match = source_if_none_match
+        self.source_if_tags = source_if_tags
+
+
+class StaticWebsite(_serialization.Model):
+    """The properties that enable an account to host a static website.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar enabled: Indicates whether this account is hosting a static website. Required.
+    :vartype enabled: bool
+    :ivar index_document: The default name of the index page under each directory.
+    :vartype index_document: str
+    :ivar error_document404_path: The absolute path of the custom 404 page.
+    :vartype error_document404_path: str
+    :ivar default_index_document_path: Absolute path of the default index page.
+    :vartype default_index_document_path: str
+    """
+
+    _validation = {
+        "enabled": {"required": True},
+    }
+
+    _attribute_map = {
+        "enabled": {"key": "Enabled", "type": "bool"},
+        "index_document": {"key": "IndexDocument", "type": "str"},
+        "error_document404_path": {"key": "ErrorDocument404Path", "type": "str"},
+        "default_index_document_path": {"key": "DefaultIndexDocumentPath", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        enabled: bool,
+        index_document: Optional[str] = None,
+        error_document404_path: Optional[str] = None,
+        default_index_document_path: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword enabled: Indicates whether this account is hosting a static website. Required.
+        :paramtype enabled: bool
+        :keyword index_document: The default name of the index page under each directory.
+        :paramtype index_document: str
+        :keyword error_document404_path: The absolute path of the custom 404 page.
+        :paramtype error_document404_path: str
+        :keyword default_index_document_path: Absolute path of the default index page.
+        :paramtype default_index_document_path: str
+        """
+        super().__init__(**kwargs)
+        self.enabled = enabled
+        self.index_document = index_document
+        self.error_document404_path = error_document404_path
+        self.default_index_document_path = default_index_document_path
+
+
+class StorageError(_serialization.Model):
+    """StorageError.
+
+    :ivar message:
+    :vartype message: str
+    """
+
+    _attribute_map = {
+        "message": {"key": "Message", "type": "str"},
+    }
+
+    def __init__(self, *, message: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword message:
+        :paramtype message: str
+        """
+        super().__init__(**kwargs)
+        self.message = message
+
+
+class StorageServiceProperties(_serialization.Model):
+    """Storage Service Properties.
+
+    :ivar logging: Azure Analytics Logging settings.
+    :vartype logging: ~azure.storage.blob.models.Logging
+    :ivar hour_metrics: a summary of request statistics grouped by API in hour or minute aggregates
+     for blobs.
+    :vartype hour_metrics: ~azure.storage.blob.models.Metrics
+    :ivar minute_metrics: a summary of request statistics grouped by API in hour or minute
+     aggregates for blobs.
+    :vartype minute_metrics: ~azure.storage.blob.models.Metrics
+    :ivar cors: The set of CORS rules.
+    :vartype cors: list[~azure.storage.blob.models.CorsRule]
+    :ivar default_service_version: The default version to use for requests to the Blob service if
+     an incoming request's version is not specified. Possible values include version 2008-10-27 and
+     all more recent versions.
+    :vartype default_service_version: str
+    :ivar delete_retention_policy: the retention policy which determines how long the associated
+     data should persist.
+    :vartype delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
+    :ivar static_website: The properties that enable an account to host a static website.
+    :vartype static_website: ~azure.storage.blob.models.StaticWebsite
+    """
+
+    _attribute_map = {
+        "logging": {"key": "Logging", "type": "Logging"},
+        "hour_metrics": {"key": "HourMetrics", "type": "Metrics"},
+        "minute_metrics": {"key": "MinuteMetrics", "type": "Metrics"},
+        "cors": {"key": "Cors", "type": "[CorsRule]", "xml": {"wrapped": True}},
+        "default_service_version": {"key": "DefaultServiceVersion", "type": "str"},
+        "delete_retention_policy": {"key": "DeleteRetentionPolicy", "type": "RetentionPolicy"},
+        "static_website": {"key": "StaticWebsite", "type": "StaticWebsite"},
+    }
+
+    def __init__(
+        self,
+        *,
+        logging: Optional["_models.Logging"] = None,
+        hour_metrics: Optional["_models.Metrics"] = None,
+        minute_metrics: Optional["_models.Metrics"] = None,
+        cors: Optional[List["_models.CorsRule"]] = None,
+        default_service_version: Optional[str] = None,
+        delete_retention_policy: Optional["_models.RetentionPolicy"] = None,
+        static_website: Optional["_models.StaticWebsite"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword logging: Azure Analytics Logging settings.
+        :paramtype logging: ~azure.storage.blob.models.Logging
+        :keyword hour_metrics: a summary of request statistics grouped by API in hour or minute
+         aggregates for blobs.
+        :paramtype hour_metrics: ~azure.storage.blob.models.Metrics
+        :keyword minute_metrics: a summary of request statistics grouped by API in hour or minute
+         aggregates for blobs.
+        :paramtype minute_metrics: ~azure.storage.blob.models.Metrics
+        :keyword cors: The set of CORS rules.
+        :paramtype cors: list[~azure.storage.blob.models.CorsRule]
+        :keyword default_service_version: The default version to use for requests to the Blob service
+         if an incoming request's version is not specified. Possible values include version 2008-10-27
+         and all more recent versions.
+        :paramtype default_service_version: str
+        :keyword delete_retention_policy: the retention policy which determines how long the associated
+         data should persist.
+        :paramtype delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
+        :keyword static_website: The properties that enable an account to host a static website.
+        :paramtype static_website: ~azure.storage.blob.models.StaticWebsite
+        """
+        super().__init__(**kwargs)
+        self.logging = logging
+        self.hour_metrics = hour_metrics
+        self.minute_metrics = minute_metrics
+        self.cors = cors
+        self.default_service_version = default_service_version
+        self.delete_retention_policy = delete_retention_policy
+        self.static_website = static_website
+
+
+class StorageServiceStats(_serialization.Model):
+    """Stats for the storage service.
+
+    :ivar geo_replication: Geo-Replication information for the Secondary Storage Service.
+    :vartype geo_replication: ~azure.storage.blob.models.GeoReplication
+    """
+
+    _attribute_map = {
+        "geo_replication": {"key": "GeoReplication", "type": "GeoReplication"},
+    }
+
+    def __init__(self, *, geo_replication: Optional["_models.GeoReplication"] = None, **kwargs: Any) -> None:
+        """
+        :keyword geo_replication: Geo-Replication information for the Secondary Storage Service.
+        :paramtype geo_replication: ~azure.storage.blob.models.GeoReplication
+        """
+        super().__init__(**kwargs)
+        self.geo_replication = geo_replication
+
+
+class UserDelegationKey(_serialization.Model):
+    """A user delegation key.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar signed_oid: The Azure Active Directory object ID in GUID format. Required.
+    :vartype signed_oid: str
+    :ivar signed_tid: The Azure Active Directory tenant ID in GUID format. Required.
+    :vartype signed_tid: str
+    :ivar signed_start: The date-time the key is active. Required.
+    :vartype signed_start: ~datetime.datetime
+    :ivar signed_expiry: The date-time the key expires. Required.
+    :vartype signed_expiry: ~datetime.datetime
+    :ivar signed_service: Abbreviation of the Azure Storage service that accepts the key. Required.
+    :vartype signed_service: str
+    :ivar signed_version: The service version that created the key. Required.
+    :vartype signed_version: str
+    :ivar value: The key as a base64 string. Required.
+    :vartype value: str
+    """
+
+    _validation = {
+        "signed_oid": {"required": True},
+        "signed_tid": {"required": True},
+        "signed_start": {"required": True},
+        "signed_expiry": {"required": True},
+        "signed_service": {"required": True},
+        "signed_version": {"required": True},
+        "value": {"required": True},
+    }
+
+    _attribute_map = {
+        "signed_oid": {"key": "SignedOid", "type": "str"},
+        "signed_tid": {"key": "SignedTid", "type": "str"},
+        "signed_start": {"key": "SignedStart", "type": "iso-8601"},
+        "signed_expiry": {"key": "SignedExpiry", "type": "iso-8601"},
+        "signed_service": {"key": "SignedService", "type": "str"},
+        "signed_version": {"key": "SignedVersion", "type": "str"},
+        "value": {"key": "Value", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        signed_oid: str,
+        signed_tid: str,
+        signed_start: datetime.datetime,
+        signed_expiry: datetime.datetime,
+        signed_service: str,
+        signed_version: str,
+        value: str,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword signed_oid: The Azure Active Directory object ID in GUID format. Required.
+        :paramtype signed_oid: str
+        :keyword signed_tid: The Azure Active Directory tenant ID in GUID format. Required.
+        :paramtype signed_tid: str
+        :keyword signed_start: The date-time the key is active. Required.
+        :paramtype signed_start: ~datetime.datetime
+        :keyword signed_expiry: The date-time the key expires. Required.
+        :paramtype signed_expiry: ~datetime.datetime
+        :keyword signed_service: Abbreviation of the Azure Storage service that accepts the key.
+         Required.
+        :paramtype signed_service: str
+        :keyword signed_version: The service version that created the key. Required.
+        :paramtype signed_version: str
+        :keyword value: The key as a base64 string. Required.
+        :paramtype value: str
+        """
+        super().__init__(**kwargs)
+        self.signed_oid = signed_oid
+        self.signed_tid = signed_tid
+        self.signed_start = signed_start
+        self.signed_expiry = signed_expiry
+        self.signed_service = signed_service
+        self.signed_version = signed_version
+        self.value = value
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/models/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/__init__.py
new file mode 100644
index 00000000..4a5bb832
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/__init__.py
@@ -0,0 +1,35 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._container_operations import ContainerOperations  # type: ignore
+from ._blob_operations import BlobOperations  # type: ignore
+from ._page_blob_operations import PageBlobOperations  # type: ignore
+from ._append_blob_operations import AppendBlobOperations  # type: ignore
+from ._block_blob_operations import BlockBlobOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "ContainerOperations",
+    "BlobOperations",
+    "PageBlobOperations",
+    "AppendBlobOperations",
+    "BlockBlobOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_append_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_append_blob_operations.py
new file mode 100644
index 00000000..a99691a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_append_blob_operations.py
@@ -0,0 +1,1118 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    content_length: int,
+    timeout: Optional[int] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    blob_cache_control: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    blob_tags_string: Optional[str] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    blob_type: Literal["AppendBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_append_block_request(
+    url: str,
+    *,
+    content_length: int,
+    content: IO[bytes],
+    timeout: Optional[int] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    max_size: Optional[int] = None,
+    append_position: Optional[int] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if max_size is not None:
+        _headers["x-ms-blob-condition-maxsize"] = _SERIALIZER.header("max_size", max_size, "int")
+    if append_position is not None:
+        _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_append_block_from_url_request(
+    url: str,
+    *,
+    source_url: str,
+    content_length: int,
+    source_range: Optional[str] = None,
+    source_content_md5: Optional[bytes] = None,
+    source_contentcrc64: Optional[bytes] = None,
+    timeout: Optional[int] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    max_size: Optional[int] = None,
+    append_position: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    copy_source_authorization: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str")
+    if source_range is not None:
+        _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str")
+    if source_content_md5 is not None:
+        _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray")
+    if source_contentcrc64 is not None:
+        _headers["x-ms-source-content-crc64"] = _SERIALIZER.header(
+            "source_contentcrc64", source_contentcrc64, "bytearray"
+        )
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if max_size is not None:
+        _headers["x-ms-blob-condition-maxsize"] = _SERIALIZER.header("max_size", max_size, "int")
+    if append_position is not None:
+        _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_seal_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    append_position: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["seal"] = kwargs.pop("comp", _params.pop("comp", "seal"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if append_position is not None:
+        _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class AppendBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`append_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create Append Blob operation creates a new append blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["AppendBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_create_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def append_block(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Append Block operation commits a new block of data to the end of an existing append blob.
+        The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to
+        AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _max_size = None
+        _append_position = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+            _max_size = append_position_access_conditions.max_size
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_append_block_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            max_size=_max_size,
+            append_position=_append_position,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-append-offset"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-append-offset")
+        )
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def append_block_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        source_url: str,
+        content_length: int,
+        source_range: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Append Block operation commits a new block of data to the end of an existing append blob
+        where the contents are read from a source url. The Append Block operation is permitted only if
+        the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on
+        version 2015-02-21 version or later.
+
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["appendblock"] = kwargs.pop("comp", _params.pop("comp", "appendblock"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _max_size = None
+        _append_position = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+            _max_size = append_position_access_conditions.max_size
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_append_block_from_url_request(
+            url=self._config.url,
+            source_url=source_url,
+            content_length=content_length,
+            source_range=source_range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            max_size=_max_size,
+            append_position=_append_position,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-append-offset"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-append-offset")
+        )
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def seal(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on
+        version 2019-12-12 version or later.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param append_position_access_conditions: Parameter group. Default value is None.
+        :type append_position_access_conditions:
+         ~azure.storage.blob.models.AppendPositionAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["seal"] = kwargs.pop("comp", _params.pop("comp", "seal"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _append_position = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if append_position_access_conditions is not None:
+            _append_position = append_position_access_conditions.append_position
+
+        _request = build_seal_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            append_position=_append_position,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_blob_operations.py
new file mode 100644
index 00000000..89d32d27
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_blob_operations.py
@@ -0,0 +1,4642 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, Iterator, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_download_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    range_get_content_md5: Optional[bool] = None,
+    range_get_content_crc64: Optional[bool] = None,
+    structured_body_type: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if range_get_content_md5 is not None:
+        _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header(
+            "range_get_content_md5", range_get_content_md5, "bool"
+        )
+    if range_get_content_crc64 is not None:
+        _headers["x-ms-range-get-content-crc64"] = _SERIALIZER.header(
+            "range_get_content_crc64", range_get_content_crc64, "bool"
+        )
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    blob_delete_type: Literal["Permanent"] = "Permanent",
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if blob_delete_type is not None:
+        _params["deletetype"] = _SERIALIZER.query("blob_delete_type", blob_delete_type, "str")
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if delete_snapshots is not None:
+        _headers["x-ms-delete-snapshots"] = _SERIALIZER.header("delete_snapshots", delete_snapshots, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_undelete_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_expiry_request(
+    url: str,
+    *,
+    expiry_options: Union[str, _models.BlobExpiryOptions],
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    expires_on: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str")
+    if expires_on is not None:
+        _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_http_headers_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    blob_cache_control: Optional[str] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_immutability_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_immutability_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_legal_hold_request(
+    url: str,
+    *,
+    legal_hold: bool,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["legalhold"] = kwargs.pop("comp", _params.pop("comp", "legalhold"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_metadata_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_acquire_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_release_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_renew_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_change_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    proposed_lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_break_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    break_period: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if break_period is not None:
+        _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_snapshot_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_start_copy_from_url_request(
+    url: str,
+    *,
+    copy_source: str,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+    rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    source_if_tags: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    blob_tags_string: Optional[str] = None,
+    seal_blob: Optional[bool] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if rehydrate_priority is not None:
+        _headers["x-ms-rehydrate-priority"] = _SERIALIZER.header("rehydrate_priority", rehydrate_priority, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    if source_if_tags is not None:
+        _headers["x-ms-source-if-tags"] = _SERIALIZER.header("source_if_tags", source_if_tags, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if seal_blob is not None:
+        _headers["x-ms-seal-blob"] = _SERIALIZER.header("seal_blob", seal_blob, "bool")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_copy_from_url_request(
+    url: str,
+    *,
+    copy_source: str,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    source_content_md5: Optional[bytes] = None,
+    blob_tags_string: Optional[str] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    copy_source_authorization: Optional[str] = None,
+    encryption_scope: Optional[str] = None,
+    copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    x_ms_requires_sync: Literal["true"] = kwargs.pop("x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-requires-sync"] = _SERIALIZER.header("x_ms_requires_sync", x_ms_requires_sync, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if source_content_md5 is not None:
+        _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if copy_source_tags is not None:
+        _headers["x-ms-copy-source-tag-option"] = _SERIALIZER.header("copy_source_tags", copy_source_tags, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_abort_copy_from_url_request(
+    url: str,
+    *,
+    copy_id: str,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+    copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+        "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+    )
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["copyid"] = _SERIALIZER.query("copy_id", copy_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-copy-action"] = _SERIALIZER.header("copy_action_abort_constant", copy_action_abort_constant, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_tier_request(
+    url: str,
+    *,
+    tier: Union[str, _models.AccessTierRequired],
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    timeout: Optional[int] = None,
+    rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+    request_id_parameter: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["tier"] = kwargs.pop("comp", _params.pop("comp", "tier"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if rehydrate_priority is not None:
+        _headers["x-ms-rehydrate-priority"] = _SERIALIZER.header("rehydrate_priority", rehydrate_priority, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_account_info_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_query_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    content: Any = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["query"] = kwargs.pop("comp", _params.pop("comp", "query"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_tags_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    snapshot: Optional[str] = None,
+    version_id: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_tags_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    version_id: Optional[str] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    request_id_parameter: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    content: Any = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if version_id is not None:
+        _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+class BlobOperations:  # pylint: disable=too-many-public-methods
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def download(
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        range_get_content_md5: Optional[bool] = None,
+        range_get_content_crc64: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Download operation reads or downloads a blob from the system, including its metadata and
+        properties. You can also call Download to read a snapshot.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param range_get_content_md5: When set to true and specified together with the Range, the
+         service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB
+         in size. Default value is None.
+        :type range_get_content_md5: bool
+        :param range_get_content_crc64: When set to true and specified together with the Range, the
+         service returns the CRC64 hash for the range, as long as the range is less than or equal to 4
+         MB in size. Default value is None.
+        :type range_get_content_crc64: bool
+        :param structured_body_type: Specifies the response content should be returned as a structured
+         message and specifies the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_download_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            range_get_content_md5=range_get_content_md5,
+            range_get_content_crc64=range_get_content_crc64,
+            structured_body_type=structured_body_type,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-creation-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-creation-time")
+            )
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+            response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+            response_headers["x-ms-is-current-version"] = self._deserialize(
+                "bool", response.headers.get("x-ms-is-current-version")
+            )
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+            response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+            response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+            response_headers["x-ms-last-access-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-last-access-time")
+            )
+            response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+            )
+            response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+                "str", response.headers.get("x-ms-immutability-policy-mode")
+            )
+            response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+            response_headers["x-ms-structured-content-length"] = self._deserialize(
+                "int", response.headers.get("x-ms-structured-content-length")
+            )
+
+        if response.status_code == 206:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-creation-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-creation-time")
+            )
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+            response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-content-crc64"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-content-crc64")
+            )
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+            response_headers["x-ms-is-current-version"] = self._deserialize(
+                "bool", response.headers.get("x-ms-is-current-version")
+            )
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+            response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+            response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+            response_headers["x-ms-last-access-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-last-access-time")
+            )
+            response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+            )
+            response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+                "str", response.headers.get("x-ms-immutability-policy-mode")
+            )
+            response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+            response_headers["x-ms-structured-content-length"] = self._deserialize(
+                "int", response.headers.get("x-ms-structured-content-length")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and
+        system properties for the blob. It does not return the content of the blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-creation-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-creation-time")
+        )
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id"))
+        response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or"))
+        response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-incremental-copy"] = self._deserialize(
+            "bool", response.headers.get("x-ms-incremental-copy")
+        )
+        response_headers["x-ms-copy-destination-snapshot"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-destination-snapshot")
+        )
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-committed-block-count")
+        )
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier"))
+        response_headers["x-ms-access-tier-inferred"] = self._deserialize(
+            "bool", response.headers.get("x-ms-access-tier-inferred")
+        )
+        response_headers["x-ms-archive-status"] = self._deserialize("str", response.headers.get("x-ms-archive-status"))
+        response_headers["x-ms-access-tier-change-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-access-tier-change-time")
+        )
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["x-ms-is-current-version"] = self._deserialize(
+            "bool", response.headers.get("x-ms-is-current-version")
+        )
+        response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count"))
+        response_headers["x-ms-expiry-time"] = self._deserialize("rfc-1123", response.headers.get("x-ms-expiry-time"))
+        response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed"))
+        response_headers["x-ms-rehydrate-priority"] = self._deserialize(
+            "str", response.headers.get("x-ms-rehydrate-priority")
+        )
+        response_headers["x-ms-last-access-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-last-access-time")
+        )
+        response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+        )
+        response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+            "str", response.headers.get("x-ms-immutability-policy-mode")
+        )
+        response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_delete_type: Literal["Permanent"] = "Permanent",
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is
+        permanently removed from the storage account. If the storage account's soft delete feature is
+        enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible
+        immediately. However, the blob service retains the blob or snapshot for the number of days
+        specified by the DeleteRetentionPolicy section of [Storage service properties]
+        (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's
+        data is permanently removed from the storage account. Note that you continue to be charged for
+        the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and
+        specify the "include=deleted" query parameter to discover which blobs and snapshots have been
+        soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other
+        operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code
+        of 404 (ResourceNotFound).
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the
+         following two options: include: Delete the base blob and all of its snapshots. only: Delete
+         only the blob's snapshots and not the blob itself. Known values are: "include" and "only".
+         Default value is None.
+        :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_delete_type: Optional.  Only possible value is 'permanent', which specifies to
+         permanently delete a blob if blob soft delete is enabled. Known values are "Permanent" and
+         None. Default value is "Permanent".
+        :type blob_delete_type: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            delete_snapshots=delete_snapshots,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_delete_type=blob_delete_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def undelete(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a blob that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_expiry(  # pylint: disable=inconsistent-return-statements
+        self,
+        expiry_options: Union[str, _models.BlobExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_http_headers(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set HTTP Headers operation sets system properties on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_cache_control = None
+        _blob_content_type = None
+        _blob_content_md5 = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _blob_content_disposition = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_http_headers_request(
+            url=self._config.url,
+            timeout=timeout,
+            blob_cache_control=_blob_cache_control,
+            blob_content_type=_blob_content_type,
+            blob_content_md5=_blob_content_md5,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_content_disposition=_blob_content_disposition,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_immutability_policy(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Immutability Policy operation sets the immutability policy on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_immutability_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            if_unmodified_since=_if_unmodified_since,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-immutability-policy-until-date"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date")
+        )
+        response_headers["x-ms-immutability-policy-mode"] = self._deserialize(
+            "str", response.headers.get("x-ms-immutability-policy-mode")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete_immutability_policy(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Delete Immutability Policy operation deletes the immutability policy on the blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["immutabilityPolicies"] = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_delete_immutability_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_legal_hold(  # pylint: disable=inconsistent-return-statements
+        self,
+        legal_hold: bool,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Legal Hold operation sets a legal hold on the blob.
+
+        :param legal_hold: Specified if a legal hold should be set on the blob. Required.
+        :type legal_hold: bool
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["legalhold"] = kwargs.pop("comp", _params.pop("comp", "legalhold"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_legal_hold_request(
+            url=self._config.url,
+            legal_hold=legal_hold,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_metadata(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or
+        more name-value pairs.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def acquire_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def release_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def renew_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def change_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        proposed_lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Required.
+        :type proposed_lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            proposed_lease_id=proposed_lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def break_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def create_snapshot(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create Snapshot operation creates a read-only snapshot of a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_snapshot_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def start_copy_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        seal_blob: Optional[bool] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Start Copy From URL operation copies a blob or an internet resource to a new blob.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived
+         blob. Known values are: "High" and "Standard". Default value is None.
+        :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param seal_blob: Overrides the sealed state of the destination blob.  Service version
+         2019-12-12 and newer. Default value is None.
+        :type seal_blob: bool
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_tags = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_tags = source_modified_access_conditions.source_if_tags
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_start_copy_from_url_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            tier=tier,
+            rehydrate_priority=rehydrate_priority,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_tags=_source_if_tags,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            seal_blob=seal_blob,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def copy_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        copy_source_authorization: Optional[str] = None,
+        copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not
+        return a response until the copy is complete.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param copy_source_tags: Optional, default 'replace'.  Indicates if source tags should be
+         copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and
+         "COPY". Default value is None.
+        :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        x_ms_requires_sync: Literal["true"] = kwargs.pop(
+            "x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _lease_id = None
+        _encryption_scope = None
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+
+        _request = build_copy_from_url_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            tier=tier,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            source_content_md5=source_content_md5,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            copy_source_authorization=copy_source_authorization,
+            encryption_scope=_encryption_scope,
+            copy_source_tags=copy_source_tags,
+            x_ms_requires_sync=x_ms_requires_sync,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def abort_copy_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a
+        destination blob with zero length and full metadata.
+
+        :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy
+         Blob operation. Required.
+        :type copy_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+        copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+            "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_abort_copy_from_url_request(
+            url=self._config.url,
+            copy_id=copy_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            copy_action_abort_constant=copy_action_abort_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_tier(  # pylint: disable=inconsistent-return-statements
+        self,
+        tier: Union[str, _models.AccessTierRequired],
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        timeout: Optional[int] = None,
+        rehydrate_priority: Optional[Union[str, _models.RehydratePriority]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a
+        premium storage account and on a block blob in a blob storage account (locally redundant
+        storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of
+        the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not
+        update the blob's ETag.
+
+        :param tier: Indicates the tier to be set on the blob. Known values are: "P4", "P6", "P10",
+         "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and "Cold".
+         Required.
+        :type tier: str or ~azure.storage.blob.models.AccessTierRequired
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived
+         blob. Known values are: "High" and "Standard". Default value is None.
+        :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tier"] = kwargs.pop("comp", _params.pop("comp", "tier"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+
+        _request = build_set_tier_request(
+            url=self._config.url,
+            tier=tier,
+            snapshot=snapshot,
+            version_id=version_id,
+            timeout=timeout,
+            rehydrate_priority=rehydrate_priority,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            if_tags=_if_tags,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_account_info(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def query(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        query_request: Optional[_models.QueryRequest] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Query operation enables users to select/project on blob data by providing simple query
+        expressions.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param query_request: the query request. Default value is None.
+        :type query_request: ~azure.storage.blob.models.QueryRequest
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["query"] = kwargs.pop("comp", _params.pop("comp", "query"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if query_request is not None:
+            _content = self._serialize.body(query_request, "QueryRequest", is_xml=True)
+        else:
+            _content = None
+
+        _request = build_query_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+
+        if response.status_code == 206:
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-sequence-number")
+            )
+            response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type"))
+            response_headers["x-ms-content-crc64"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-content-crc64")
+            )
+            response_headers["x-ms-copy-completion-time"] = self._deserialize(
+                "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+            )
+            response_headers["x-ms-copy-status-description"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-status-description")
+            )
+            response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+            response_headers["x-ms-copy-progress"] = self._deserialize(
+                "str", response.headers.get("x-ms-copy-progress")
+            )
+            response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+            response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-client-request-id"] = self._deserialize(
+                "str", response.headers.get("x-ms-client-request-id")
+            )
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-blob-committed-block-count"] = self._deserialize(
+                "int", response.headers.get("x-ms-blob-committed-block-count")
+            )
+            response_headers["x-ms-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+            response_headers["x-ms-encryption-scope"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-scope")
+            )
+            response_headers["x-ms-blob-content-md5"] = self._deserialize(
+                "bytearray", response.headers.get("x-ms-blob-content-md5")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_tags(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.BlobTags:
+        # pylint: disable=line-too-long
+        """The Get Tags operation enables users to get the tags associated with a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: BlobTags or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.BlobTags
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+        cls: ClsType[_models.BlobTags] = kwargs.pop("cls", None)
+
+        _if_tags = None
+        _lease_id = None
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_tags_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            snapshot=snapshot,
+            version_id=version_id,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("BlobTags", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def set_tags(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        version_id: Optional[str] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        tags: Optional[_models.BlobTags] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Set Tags operation enables users to set tags on a blob.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param version_id: The version id parameter is an opaque DateTime value that, when present,
+         specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+         Default value is None.
+        :type version_id: str
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param tags: Blob tags. Default value is None.
+        :type tags: ~azure.storage.blob.models.BlobTags
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["tags"] = kwargs.pop("comp", _params.pop("comp", "tags"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_tags = None
+        _lease_id = None
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if tags is not None:
+            _content = self._serialize.body(tags, "BlobTags", is_xml=True)
+        else:
+            _content = None
+
+        _request = build_set_tags_request(
+            url=self._config.url,
+            timeout=timeout,
+            version_id=version_id,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            request_id_parameter=request_id_parameter,
+            if_tags=_if_tags,
+            lease_id=_lease_id,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_block_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_block_blob_operations.py
new file mode 100644
index 00000000..206ee6aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_block_blob_operations.py
@@ -0,0 +1,1790 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_upload_request(
+    url: str,
+    *,
+    content_length: int,
+    content: IO[bytes],
+    timeout: Optional[int] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    blob_cache_control: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    blob_tags_string: Optional[str] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_put_blob_from_url_request(
+    url: str,
+    *,
+    content_length: int,
+    copy_source: str,
+    timeout: Optional[int] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    blob_cache_control: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    source_if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    source_content_md5: Optional[bytes] = None,
+    blob_tags_string: Optional[str] = None,
+    copy_source_blob_properties: Optional[bool] = None,
+    copy_source_authorization: Optional[str] = None,
+    copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    if source_if_tags is not None:
+        _headers["x-ms-source-if-tags"] = _SERIALIZER.header("source_if_tags", source_if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if source_content_md5 is not None:
+        _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    if copy_source_blob_properties is not None:
+        _headers["x-ms-copy-source-blob-properties"] = _SERIALIZER.header(
+            "copy_source_blob_properties", copy_source_blob_properties, "bool"
+        )
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    if copy_source_tags is not None:
+        _headers["x-ms-copy-source-tag-option"] = _SERIALIZER.header("copy_source_tags", copy_source_tags, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_stage_block_request(
+    url: str,
+    *,
+    block_id: str,
+    content_length: int,
+    content: IO[bytes],
+    transactional_content_md5: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["blockid"] = _SERIALIZER.query("block_id", block_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_stage_block_from_url_request(
+    url: str,
+    *,
+    block_id: str,
+    content_length: int,
+    source_url: str,
+    source_range: Optional[str] = None,
+    source_content_md5: Optional[bytes] = None,
+    source_contentcrc64: Optional[bytes] = None,
+    timeout: Optional[int] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    copy_source_authorization: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["blockid"] = _SERIALIZER.query("block_id", block_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str")
+    if source_range is not None:
+        _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str")
+    if source_content_md5 is not None:
+        _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray")
+    if source_contentcrc64 is not None:
+        _headers["x-ms-source-content-crc64"] = _SERIALIZER.header(
+            "source_contentcrc64", source_contentcrc64, "bytearray"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_commit_block_list_request(
+    url: str,
+    *,
+    content: Any,
+    timeout: Optional[int] = None,
+    blob_cache_control: Optional[str] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    transactional_content_md5: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    blob_tags_string: Optional[str] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_block_list_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    list_type: Union[str, _models.BlockListType] = "committed",
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    _params["blocklisttype"] = _SERIALIZER.query("list_type", list_type, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class BlockBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`block_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def upload(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Block Blob operation updates the content of an existing block blob. Updating an
+        existing block blob overwrites any existing metadata on the blob. Partial updates are not
+        supported with Put Blob; the content of the existing blob is overwritten with the content of
+        the new blob. To perform a partial update of the content of a block blob, use the Put Block
+        List operation.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_upload_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            transactional_content_crc64=transactional_content_crc64,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            blob_type=blob_type,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def put_blob_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        blob_tags_string: Optional[str] = None,
+        copy_source_blob_properties: Optional[bool] = None,
+        copy_source_authorization: Optional[str] = None,
+        copy_source_tags: Optional[Union[str, _models.BlobCopySourceTags]] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are
+        read from a given URL.  This API is supported beginning with the 2020-04-08 version. Partial
+        updates are not supported with Put Blob from URL; the content of an existing blob is
+        overwritten with the content of the new blob.  To perform partial updates to a block blob’s
+        contents using a source URL, use the Put Block from URL API in conjunction with Put Block List.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param copy_source_blob_properties: Optional, default is true.  Indicates if properties from
+         the source blob should be copied. Default value is None.
+        :type copy_source_blob_properties: bool
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param copy_source_tags: Optional, default 'replace'.  Indicates if source tags should be
+         copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and
+         "COPY". Default value is None.
+        :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["BlockBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_tags = source_modified_access_conditions.source_if_tags
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_put_blob_from_url_request(
+            url=self._config.url,
+            content_length=content_length,
+            copy_source=copy_source,
+            timeout=timeout,
+            transactional_content_md5=transactional_content_md5,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_tags=_source_if_tags,
+            request_id_parameter=request_id_parameter,
+            source_content_md5=source_content_md5,
+            blob_tags_string=blob_tags_string,
+            copy_source_blob_properties=copy_source_blob_properties,
+            copy_source_authorization=copy_source_authorization,
+            copy_source_tags=copy_source_tags,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def stage_block(  # pylint: disable=inconsistent-return-statements
+        self,
+        block_id: str,
+        content_length: int,
+        body: IO[bytes],
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Stage Block operation creates a new block to be committed as part of a blob.
+
+        :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the
+         string must be less than or equal to 64 bytes in size. For a given blob, the length of the
+         value specified for the blockid parameter must be the same size for each block. Required.
+        :type block_id: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        _content = body
+
+        _request = build_stage_block_request(
+            url=self._config.url,
+            block_id=block_id,
+            content_length=content_length,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def stage_block_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        block_id: str,
+        content_length: int,
+        source_url: str,
+        source_range: Optional[str] = None,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Stage Block operation creates a new block to be committed as part of a blob where the
+        contents are read from a URL.
+
+        :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the
+         string must be less than or equal to 64 bytes in size. For a given blob, the length of the
+         value specified for the blockid parameter must be the same size for each block. Required.
+        :type block_id: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["block"] = kwargs.pop("comp", _params.pop("comp", "block"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_stage_block_from_url_request(
+            url=self._config.url,
+            block_id=block_id,
+            content_length=content_length,
+            source_url=source_url,
+            source_range=source_range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def commit_block_list(  # pylint: disable=inconsistent-return-statements
+        self,
+        blocks: _models.BlockLookupList,
+        timeout: Optional[int] = None,
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        tier: Optional[Union[str, _models.AccessTierOptional]] = None,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Commit Block List operation writes a blob by specifying the list of block IDs that make up
+        the blob. In order to be written as part of a blob, a block must have been successfully written
+        to the server in a prior Put Block operation. You can call Put Block List to update a blob by
+        uploading only those blocks that have changed, then committing the new and existing blocks
+        together. You can do this by specifying whether to commit a block from the committed block list
+        or from the uncommitted block list, or to commit the most recently uploaded version of the
+        block, whichever list it may belong to.
+
+        :param blocks: Blob Blocks. Required.
+        :type blocks: ~azure.storage.blob.models.BlockLookupList
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6",
+         "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and
+         "Cold". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_cache_control = None
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = self._serialize.body(blocks, "BlockLookupList", is_xml=True)
+
+        _request = build_commit_block_list_request(
+            url=self._config.url,
+            timeout=timeout,
+            blob_cache_control=_blob_cache_control,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            tier=tier,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_block_list(
+        self,
+        snapshot: Optional[str] = None,
+        list_type: Union[str, _models.BlockListType] = "committed",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.BlockList:
+        # pylint: disable=line-too-long
+        """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a
+        block blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param list_type: Specifies whether to return the list of committed blocks, the list of
+         uncommitted blocks, or both lists together. Known values are: "committed", "uncommitted", and
+         "all". Default value is "committed".
+        :type list_type: str or ~azure.storage.blob.models.BlockListType
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: BlockList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.BlockList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blocklist"] = kwargs.pop("comp", _params.pop("comp", "blocklist"))
+        cls: ClsType[_models.BlockList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_tags = modified_access_conditions.if_tags
+
+        _request = build_get_block_list_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            list_type=list_type,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("BlockList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_container_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_container_operations.py
new file mode 100644
index 00000000..3593b490
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_container_operations.py
@@ -0,0 +1,2648 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    access: Optional[Union[str, _models.PublicAccessType]] = None,
+    request_id_parameter: Optional[str] = None,
+    default_encryption_scope: Optional[str] = None,
+    prevent_encryption_scope_override: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if access is not None:
+        _headers["x-ms-blob-public-access"] = _SERIALIZER.header("access", access, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if default_encryption_scope is not None:
+        _headers["x-ms-default-encryption-scope"] = _SERIALIZER.header(
+            "default_encryption_scope", default_encryption_scope, "str"
+        )
+    if prevent_encryption_scope_override is not None:
+        _headers["x-ms-deny-encryption-scope-override"] = _SERIALIZER.header(
+            "prevent_encryption_scope_override", prevent_encryption_scope_override, "bool"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_metadata_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_access_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    access: Optional[Union[str, _models.PublicAccessType]] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    content: Any = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if access is not None:
+        _headers["x-ms-blob-public-access"] = _SERIALIZER.header("access", access, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_restore_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    deleted_container_name: Optional[str] = None,
+    deleted_container_version: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if deleted_container_name is not None:
+        _headers["x-ms-deleted-container-name"] = _SERIALIZER.header(
+            "deleted_container_name", deleted_container_name, "str"
+        )
+    if deleted_container_version is not None:
+        _headers["x-ms-deleted-container-version"] = _SERIALIZER.header(
+            "deleted_container_version", deleted_container_version, "str"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_rename_request(
+    url: str,
+    *,
+    source_container_name: str,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    source_lease_id: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-source-container-name"] = _SERIALIZER.header("source_container_name", source_container_name, "str")
+    if source_lease_id is not None:
+        _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_submit_batch_request(
+    url: str,
+    *,
+    content_length: int,
+    content: IO[bytes],
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+    multipart_content_type: Optional[str] = kwargs.pop("multipart_content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if multipart_content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("multipart_content_type", multipart_content_type, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_filter_blobs_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    where: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if where is not None:
+        _params["where"] = _SERIALIZER.query("where", where, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_acquire_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_release_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_renew_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_break_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    break_period: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if break_period is not None:
+        _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_change_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    proposed_lease_id: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_blob_flat_segment_request(
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_blob_hierarchy_segment_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    delimiter: str,
+    prefix: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    _params["delimiter"] = _SERIALIZER.query("delimiter", delimiter, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_account_info_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ContainerOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`container` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        access: Optional[Union[str, _models.PublicAccessType]] = None,
+        request_id_parameter: Optional[str] = None,
+        container_cpk_scope_info: Optional[_models.ContainerCpkScopeInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """creates a new container under the specified account. If the container with the same name
+        already exists, the operation fails.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param access: Specifies whether data in the container may be accessed publicly and the level
+         of access. Known values are: "container" and "blob". Default value is None.
+        :type access: str or ~azure.storage.blob.models.PublicAccessType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param container_cpk_scope_info: Parameter group. Default value is None.
+        :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _default_encryption_scope = None
+        _prevent_encryption_scope_override = None
+        if container_cpk_scope_info is not None:
+            _default_encryption_scope = container_cpk_scope_info.default_encryption_scope
+            _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            access=access,
+            request_id_parameter=request_id_parameter,
+            default_encryption_scope=_default_encryption_scope,
+            prevent_encryption_scope_override=_prevent_encryption_scope_override,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """returns all user-defined metadata and system properties for the specified container. The data
+        returned does not include the container's list of blobs.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-blob-public-access"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-public-access")
+        )
+        response_headers["x-ms-has-immutability-policy"] = self._deserialize(
+            "bool", response.headers.get("x-ms-has-immutability-policy")
+        )
+        response_headers["x-ms-has-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-has-legal-hold"))
+        response_headers["x-ms-default-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-default-encryption-scope")
+        )
+        response_headers["x-ms-deny-encryption-scope-override"] = self._deserialize(
+            "bool", response.headers.get("x-ms-deny-encryption-scope-override")
+        )
+        response_headers["x-ms-immutable-storage-with-versioning-enabled"] = self._deserialize(
+            "bool", response.headers.get("x-ms-immutable-storage-with-versioning-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """operation marks the specified container for deletion. The container and any blobs contained
+        within it are later deleted during garbage collection.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_metadata(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """operation sets one or more user-defined name-value pairs for the specified container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            metadata=metadata,
+            if_modified_since=_if_modified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> List[_models.SignedIdentifier]:
+        # pylint: disable=line-too-long
+        """gets the permissions for the specified container. The permissions indicate whether container
+        data may be accessed publicly.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :return: list of SignedIdentifier or the result of cls(response)
+        :rtype: list[~azure.storage.blob.models.SignedIdentifier]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-blob-public-access"] = self._deserialize(
+            "str", response.headers.get("x-ms-blob-public-access")
+        )
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("[SignedIdentifier]", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def set_access_policy(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        access: Optional[Union[str, _models.PublicAccessType]] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        container_acl: Optional[List[_models.SignedIdentifier]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """sets the permissions for the specified container. The permissions indicate whether blobs in a
+        container may be accessed publicly.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param access: Specifies whether data in the container may be accessed publicly and the level
+         of access. Known values are: "container" and "blob". Default value is None.
+        :type access: str or ~azure.storage.blob.models.PublicAccessType
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param container_acl: the acls for the container. Default value is None.
+        :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True, "itemsName": "SignedIdentifier"}}
+        if container_acl is not None:
+            _content = self._serialize.body(
+                container_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt
+            )
+        else:
+            _content = None
+
+        _request = build_set_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            access=access,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def restore(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        deleted_container_name: Optional[str] = None,
+        deleted_container_version: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Restores a previously-deleted container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param deleted_container_name: Optional.  Version 2019-12-12 and later.  Specifies the name of
+         the deleted container to restore. Default value is None.
+        :type deleted_container_name: str
+        :param deleted_container_version: Optional.  Version 2019-12-12 and later.  Specifies the
+         version of the deleted container to restore. Default value is None.
+        :type deleted_container_version: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_restore_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            deleted_container_name=deleted_container_name,
+            deleted_container_version=deleted_container_version,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def rename(  # pylint: disable=inconsistent-return-statements
+        self,
+        source_container_name: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames an existing container.
+
+        :param source_container_name: Required.  Specifies the name of the container to rename.
+         Required.
+        :type source_container_name: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_rename_request(
+            url=self._config.url,
+            source_container_name=source_container_name,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            source_lease_id=source_lease_id,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def submit_batch(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Batch operation allows multiple API calls to be embedded into a single HTTP request.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+        multipart_content_type: str = kwargs.pop(
+            "multipart_content_type", _headers.pop("Content-Type", "application/xml")
+        )
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _content = body
+
+        _request = build_submit_batch_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            multipart_content_type=multipart_content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def filter_blobs(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        where: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+        **kwargs: Any
+    ) -> _models.FilterBlobSegment:
+        # pylint: disable=line-too-long
+        """The Filter Blobs operation enables callers to list blobs in a container whose tags match a
+        given search expression.  Filter blobs searches within the given container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param where: Filters the results to return only to return only blobs whose tags match the
+         specified expression. Default value is None.
+        :type where: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem]
+        :return: FilterBlobSegment or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.FilterBlobSegment
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+        cls: ClsType[_models.FilterBlobSegment] = kwargs.pop("cls", None)
+
+        _request = build_filter_blobs_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            where=where,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("FilterBlobSegment", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def acquire_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def release_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def renew_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def break_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def change_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        proposed_lease_id: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] establishes and manages a lock on a container for delete operations. The lock duration
+        can be 15 to 60 seconds, or can be infinite.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Required.
+        :type proposed_lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            proposed_lease_id=proposed_lease_id,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            restype=restype,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def list_blob_flat_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsFlatSegmentResponse:
+        # pylint: disable=line-too-long
+        """[Update] The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsFlatSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsFlatSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_flat_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsFlatSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_blob_hierarchy_segment(
+        self,
+        delimiter: str,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """[Update] The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Required.
+        :type delimiter: str
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            delimiter=delimiter,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_account_info(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_page_blob_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_page_blob_operations.py
new file mode 100644
index 00000000..747cfbd8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_page_blob_operations.py
@@ -0,0 +1,2218 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    content_length: int,
+    blob_content_length: int,
+    timeout: Optional[int] = None,
+    tier: Optional[Union[str, _models.PremiumPageBlobAccessTier]] = None,
+    blob_content_type: Optional[str] = None,
+    blob_content_encoding: Optional[str] = None,
+    blob_content_language: Optional[str] = None,
+    blob_content_md5: Optional[bytes] = None,
+    blob_cache_control: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    blob_content_disposition: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    blob_sequence_number: int = 0,
+    request_id_parameter: Optional[str] = None,
+    blob_tags_string: Optional[str] = None,
+    immutability_policy_expiry: Optional[datetime.datetime] = None,
+    immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+    legal_hold: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    blob_type: Literal["PageBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str")
+    if blob_content_type is not None:
+        _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str")
+    if blob_content_encoding is not None:
+        _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header(
+            "blob_content_encoding", blob_content_encoding, "str"
+        )
+    if blob_content_language is not None:
+        _headers["x-ms-blob-content-language"] = _SERIALIZER.header(
+            "blob_content_language", blob_content_language, "str"
+        )
+    if blob_content_md5 is not None:
+        _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray")
+    if blob_cache_control is not None:
+        _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if blob_content_disposition is not None:
+        _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header(
+            "blob_content_disposition", blob_content_disposition, "str"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-blob-content-length"] = _SERIALIZER.header("blob_content_length", blob_content_length, "int")
+    if blob_sequence_number is not None:
+        _headers["x-ms-blob-sequence-number"] = _SERIALIZER.header("blob_sequence_number", blob_sequence_number, "int")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if blob_tags_string is not None:
+        _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str")
+    if immutability_policy_expiry is not None:
+        _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header(
+            "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123"
+        )
+    if immutability_policy_mode is not None:
+        _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header(
+            "immutability_policy_mode", immutability_policy_mode, "str"
+        )
+    if legal_hold is not None:
+        _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_upload_pages_request(
+    url: str,
+    *,
+    content_length: int,
+    content: IO[bytes],
+    transactional_content_md5: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_sequence_number_less_than_or_equal_to: Optional[int] = None,
+    if_sequence_number_less_than: Optional[int] = None,
+    if_sequence_number_equal_to: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+    page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if transactional_content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_md5", transactional_content_md5, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_sequence_number_less_than_or_equal_to is not None:
+        _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header(
+            "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int"
+        )
+    if if_sequence_number_less_than is not None:
+        _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header(
+            "if_sequence_number_less_than", if_sequence_number_less_than, "int"
+        )
+    if if_sequence_number_equal_to is not None:
+        _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header(
+            "if_sequence_number_equal_to", if_sequence_number_equal_to, "int"
+        )
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_clear_pages_request(
+    url: str,
+    *,
+    content_length: int,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_sequence_number_less_than_or_equal_to: Optional[int] = None,
+    if_sequence_number_less_than: Optional[int] = None,
+    if_sequence_number_equal_to: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+    page_write: Literal["clear"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_sequence_number_less_than_or_equal_to is not None:
+        _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header(
+            "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int"
+        )
+    if if_sequence_number_less_than is not None:
+        _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header(
+            "if_sequence_number_less_than", if_sequence_number_less_than, "int"
+        )
+    if if_sequence_number_equal_to is not None:
+        _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header(
+            "if_sequence_number_equal_to", if_sequence_number_equal_to, "int"
+        )
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_upload_pages_from_url_request(
+    url: str,
+    *,
+    source_url: str,
+    source_range: str,
+    content_length: int,
+    range: str,
+    source_content_md5: Optional[bytes] = None,
+    source_contentcrc64: Optional[bytes] = None,
+    timeout: Optional[int] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_sequence_number_less_than_or_equal_to: Optional[int] = None,
+    if_sequence_number_less_than: Optional[int] = None,
+    if_sequence_number_equal_to: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    copy_source_authorization: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+    page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str")
+    _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str")
+    if source_content_md5 is not None:
+        _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray")
+    if source_contentcrc64 is not None:
+        _headers["x-ms-source-content-crc64"] = _SERIALIZER.header(
+            "source_contentcrc64", source_contentcrc64, "bytearray"
+        )
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_sequence_number_less_than_or_equal_to is not None:
+        _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header(
+            "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int"
+        )
+    if if_sequence_number_less_than is not None:
+        _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header(
+            "if_sequence_number_less_than", if_sequence_number_less_than, "int"
+        )
+    if if_sequence_number_equal_to is not None:
+        _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header(
+            "if_sequence_number_equal_to", if_sequence_number_equal_to, "int"
+        )
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_page_ranges_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+
+    # Construct headers
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_page_ranges_diff_request(
+    url: str,
+    *,
+    snapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    prevsnapshot: Optional[str] = None,
+    prev_snapshot_url: Optional[str] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if snapshot is not None:
+        _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if prevsnapshot is not None:
+        _params["prevsnapshot"] = _SERIALIZER.query("prevsnapshot", prevsnapshot, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+
+    # Construct headers
+    if prev_snapshot_url is not None:
+        _headers["x-ms-previous-snapshot-url"] = _SERIALIZER.header("prev_snapshot_url", prev_snapshot_url, "str")
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_resize_request(
+    url: str,
+    *,
+    blob_content_length: int,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Optional[Union[str, _models.EncryptionAlgorithmType]] = None,
+    encryption_scope: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if encryption_scope is not None:
+        _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-blob-content-length"] = _SERIALIZER.header("blob_content_length", blob_content_length, "int")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_sequence_number_request(
+    url: str,
+    *,
+    sequence_number_action: Union[str, _models.SequenceNumberActionType],
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    blob_sequence_number: int = 0,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-sequence-number-action"] = _SERIALIZER.header(
+        "sequence_number_action", sequence_number_action, "str"
+    )
+    if blob_sequence_number is not None:
+        _headers["x-ms-blob-sequence-number"] = _SERIALIZER.header("blob_sequence_number", blob_sequence_number, "int")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_copy_incremental_request(
+    url: str,
+    *,
+    copy_source: str,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_tags: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["incrementalcopy"] = kwargs.pop("comp", _params.pop("comp", "incrementalcopy"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_tags is not None:
+        _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class PageBlobOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`page_blob` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        blob_content_length: int,
+        timeout: Optional[int] = None,
+        tier: Optional[Union[str, _models.PremiumPageBlobAccessTier]] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        blob_sequence_number: int = 0,
+        request_id_parameter: Optional[str] = None,
+        blob_tags_string: Optional[str] = None,
+        immutability_policy_expiry: Optional[datetime.datetime] = None,
+        immutability_policy_mode: Optional[Union[str, _models.BlobImmutabilityPolicyMode]] = None,
+        legal_hold: Optional[bool] = None,
+        blob_http_headers: Optional[_models.BlobHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Create operation creates a new page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param blob_content_length: This header specifies the maximum size for the page blob, up to 1
+         TB. The page blob size must be aligned to a 512-byte boundary. Required.
+        :type blob_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param tier: Optional. Indicates the tier to be set on the page blob. Known values are: "P4",
+         "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", and "P80". Default value is None.
+        :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier
+        :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob.
+         If no name-value pairs are specified, the operation will copy the metadata from the source blob
+         or file to the destination blob. If one or more name-value pairs are specified, the destination
+         blob is created with the specified metadata, and metadata is not copied from the source blob or
+         file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
+         rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more
+         information. Default value is None.
+        :type metadata: dict[str, str]
+        :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
+         value that you can use to track requests. The value of the sequence number must be between 0
+         and 2^63 - 1. Default value is 0.
+        :type blob_sequence_number: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param blob_tags_string: Optional.  Used to set blob tags in various blob operations. Default
+         value is None.
+        :type blob_tags_string: str
+        :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy
+         is set to expire. Default value is None.
+        :type immutability_policy_expiry: ~datetime.datetime
+        :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob.
+         Known values are: "Mutable", "Unlocked", and "Locked". Default value is None.
+        :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode
+        :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None.
+        :type legal_hold: bool
+        :param blob_http_headers: Parameter group. Default value is None.
+        :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        blob_type: Literal["PageBlob"] = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _blob_content_type = None
+        _blob_content_encoding = None
+        _blob_content_language = None
+        _blob_content_md5 = None
+        _blob_cache_control = None
+        _lease_id = None
+        _blob_content_disposition = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if blob_http_headers is not None:
+            _blob_cache_control = blob_http_headers.blob_cache_control
+            _blob_content_disposition = blob_http_headers.blob_content_disposition
+            _blob_content_encoding = blob_http_headers.blob_content_encoding
+            _blob_content_language = blob_http_headers.blob_content_language
+            _blob_content_md5 = blob_http_headers.blob_content_md5
+            _blob_content_type = blob_http_headers.blob_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_create_request(
+            url=self._config.url,
+            content_length=content_length,
+            blob_content_length=blob_content_length,
+            timeout=timeout,
+            tier=tier,
+            blob_content_type=_blob_content_type,
+            blob_content_encoding=_blob_content_encoding,
+            blob_content_language=_blob_content_language,
+            blob_content_md5=_blob_content_md5,
+            blob_cache_control=_blob_cache_control,
+            metadata=metadata,
+            lease_id=_lease_id,
+            blob_content_disposition=_blob_content_disposition,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_sequence_number=blob_sequence_number,
+            request_id_parameter=request_id_parameter,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            blob_type=blob_type,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def upload_pages(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        body: IO[bytes],
+        transactional_content_md5: Optional[bytes] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Pages operation writes a range of pages to a page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param transactional_content_md5: Specify the transactional md5 for the body, to be validated
+         by the service. Default value is None.
+        :type transactional_content_md5: bytes
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_upload_pages_request(
+            url=self._config.url,
+            content_length=content_length,
+            transactional_content_md5=transactional_content_md5,
+            transactional_content_crc64=transactional_content_crc64,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            comp=comp,
+            page_write=page_write,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def clear_pages(  # pylint: disable=inconsistent-return-statements
+        self,
+        content_length: int,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Clear Pages operation clears a set of pages from a page blob.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["clear"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_clear_pages_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            page_write=page_write,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def upload_pages_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        source_url: str,
+        source_range: str,
+        content_length: int,
+        range: str,
+        source_content_md5: Optional[bytes] = None,
+        source_contentcrc64: Optional[bytes] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        copy_source_authorization: Optional[str] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Upload Pages operation writes a range of pages to a page blob where the contents are read
+        from a URL.
+
+        :param source_url: Specify a URL to the copy source. Required.
+        :type source_url: str
+        :param source_range: Bytes of source data in the specified range. The length of this range
+         should match the ContentLength header and x-ms-range/Range destination range header. Required.
+        :type source_range: str
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param range: The range of bytes to which the source range would be written. The range should
+         be 512 aligned and range-end is required. Required.
+        :type range: str
+        :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read
+         from the copy source. Default value is None.
+        :type source_content_md5: bytes
+        :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_contentcrc64: bytes
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param sequence_number_access_conditions: Parameter group. Default value is None.
+        :type sequence_number_access_conditions:
+         ~azure.storage.blob.models.SequenceNumberAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.blob.models.SourceModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["page"] = kwargs.pop("comp", _params.pop("comp", "page"))
+        page_write: Literal["update"] = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _lease_id = None
+        _if_sequence_number_less_than_or_equal_to = None
+        _if_sequence_number_less_than = None
+        _if_sequence_number_equal_to = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if sequence_number_access_conditions is not None:
+            _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+            _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+            _if_sequence_number_less_than_or_equal_to = (
+                sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+            )
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+        _request = build_upload_pages_from_url_request(
+            url=self._config.url,
+            source_url=source_url,
+            source_range=source_range,
+            content_length=content_length,
+            range=range,
+            source_content_md5=source_content_md5,
+            source_contentcrc64=source_contentcrc64,
+            timeout=timeout,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            lease_id=_lease_id,
+            if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to,
+            if_sequence_number_less_than=_if_sequence_number_less_than,
+            if_sequence_number_equal_to=_if_sequence_number_equal_to,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            request_id_parameter=request_id_parameter,
+            copy_source_authorization=copy_source_authorization,
+            comp=comp,
+            page_write=page_write,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-encryption-scope"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-scope")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_page_ranges(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.PageList:
+        # pylint: disable=line-too-long
+        """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
+        of a page blob.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: PageList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.PageList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+        cls: ClsType[_models.PageList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_page_ranges_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            marker=marker,
+            maxresults=maxresults,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("PageList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_page_ranges_diff(
+        self,
+        snapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        prevsnapshot: Optional[str] = None,
+        prev_snapshot_url: Optional[str] = None,
+        range: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.PageList:
+        # pylint: disable=line-too-long
+        """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
+        were changed between target blob and previous snapshot.
+
+        :param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
+         see :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
+         a Snapshot of a Blob.</a>`. Default value is None.
+        :type snapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a
+         DateTime value that specifies that the response will contain only pages that were changed
+         between target blob and previous snapshot. Changed pages include both updated and cleared
+         pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is
+         the older of the two. Note that incremental snapshots are currently supported only for blobs
+         created on or after January 1, 2016. Default value is None.
+        :type prevsnapshot: str
+        :param prev_snapshot_url: Optional. This header is only supported in service versions
+         2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The
+         response will only contain pages that were changed between the target blob and its previous
+         snapshot. Default value is None.
+        :type prev_snapshot_url: str
+        :param range: Return only the bytes of the blob in the specified range. Default value is None.
+        :type range: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: PageList or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.PageList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["pagelist"] = kwargs.pop("comp", _params.pop("comp", "pagelist"))
+        cls: ClsType[_models.PageList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_page_ranges_diff_request(
+            url=self._config.url,
+            snapshot=snapshot,
+            timeout=timeout,
+            prevsnapshot=prevsnapshot,
+            prev_snapshot_url=prev_snapshot_url,
+            range=range,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            marker=marker,
+            maxresults=maxresults,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-blob-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-content-length")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("PageList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def resize(  # pylint: disable=inconsistent-return-statements
+        self,
+        blob_content_length: int,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        cpk_scope_info: Optional[_models.CpkScopeInfo] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Resize the Blob.
+
+        :param blob_content_length: This header specifies the maximum size for the page blob, up to 1
+         TB. The page blob size must be aligned to a 512-byte boundary. Required.
+        :type blob_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.blob.models.CpkInfo
+        :param cpk_scope_info: Parameter group. Default value is None.
+        :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        _encryption_scope = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        if cpk_scope_info is not None:
+            _encryption_scope = cpk_scope_info.encryption_scope
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_resize_request(
+            url=self._config.url,
+            blob_content_length=blob_content_length,
+            timeout=timeout,
+            lease_id=_lease_id,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,
+            encryption_scope=_encryption_scope,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def update_sequence_number(  # pylint: disable=inconsistent-return-statements
+        self,
+        sequence_number_action: Union[str, _models.SequenceNumberActionType],
+        timeout: Optional[int] = None,
+        blob_sequence_number: int = 0,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Update the sequence number of the blob.
+
+        :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the
+         request. This property applies to page blobs only. This property indicates how the service
+         should modify the blob's sequence number. Known values are: "max", "update", and "increment".
+         Required.
+        :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled
+         value that you can use to track requests. The value of the sequence number must be between 0
+         and 2^63 - 1. Default value is 0.
+        :type blob_sequence_number: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_update_sequence_number_request(
+            url=self._config.url,
+            sequence_number_action=sequence_number_action,
+            timeout=timeout,
+            lease_id=_lease_id,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            blob_sequence_number=blob_sequence_number,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-blob-sequence-number"] = self._deserialize(
+            "int", response.headers.get("x-ms-blob-sequence-number")
+        )
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def copy_incremental(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Copy Incremental operation copies a snapshot of the source page blob to a destination page
+        blob. The snapshot is copied such that only the differential changes between the previously
+        copied snapshot are transferred to the destination. The copied snapshots are complete copies of
+        the original snapshot and can be read or copied from as usual. This API is supported since REST
+        version 2016-05-31.
+
+        :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of
+         up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it
+         would appear in a request URI. The source blob must either be public or must be authenticated
+         via a shared access signature. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["incrementalcopy"] = kwargs.pop("comp", _params.pop("comp", "incrementalcopy"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _if_match = None
+        _if_none_match = None
+        _if_tags = None
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_tags = modified_access_conditions.if_tags
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_copy_incremental_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_tags=_if_tags,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_service_operations.py
new file mode 100644
index 00000000..e6c164ef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/operations/_service_operations.py
@@ -0,0 +1,1063 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureBlobStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_set_properties_request(
+    url: str, *, content: Any, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_properties_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_statistics_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_containers_segment_request(
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListContainersIncludeType]]] = None,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_user_delegation_key_request(
+    url: str, *, content: Any, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["userdelegationkey"] = kwargs.pop("comp", _params.pop("comp", "userdelegationkey"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_account_info_request(
+    url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_submit_batch_request(
+    url: str,
+    *,
+    content_length: int,
+    content: IO[bytes],
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+    multipart_content_type: Optional[str] = kwargs.pop("multipart_content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if multipart_content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("multipart_content_type", multipart_content_type, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_filter_blobs_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    where: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if where is not None:
+        _params["where"] = _SERIALIZER.query("where", where, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.blob.AzureBlobStorage`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureBlobStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        storage_service_properties: _models.StorageServiceProperties,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for a storage account's Blob service endpoint, including properties for Storage
+        Analytics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param storage_service_properties: The StorageService properties. Required.
+        :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> _models.StorageServiceProperties:
+        # pylint: disable=line-too-long
+        """gets the properties of a storage account's Blob service, including properties for Storage
+        Analytics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: StorageServiceProperties or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.StorageServiceProperties
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("StorageServiceProperties", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_statistics(
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> _models.StorageServiceStats:
+        # pylint: disable=line-too-long
+        """Retrieves statistics related to replication for the Blob service. It is only available on the
+        secondary location endpoint when read-access geo-redundant replication is enabled for the
+        storage account.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: StorageServiceStats or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.StorageServiceStats
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+        cls: ClsType[_models.StorageServiceStats] = kwargs.pop("cls", None)
+
+        _request = build_get_statistics_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("StorageServiceStats", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_containers_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListContainersIncludeType]]] = None,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListContainersSegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Containers Segment operation returns a list of the containers under the specified
+        account.
+
+        :param prefix: Filters the results to return only containers whose name begins with the
+         specified prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify that the container's metadata be returned as
+         part of the response body. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListContainersSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListContainersSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_containers_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("ListContainersSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_user_delegation_key(
+        self,
+        key_info: _models.KeyInfo,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.UserDelegationKey:
+        # pylint: disable=line-too-long
+        """Retrieves a user delegation key for the Blob service. This is only a valid operation when using
+        bearer token authentication.
+
+        :param key_info: Key information. Required.
+        :type key_info: ~azure.storage.blob.models.KeyInfo
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: UserDelegationKey or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.UserDelegationKey
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["userdelegationkey"] = kwargs.pop("comp", _params.pop("comp", "userdelegationkey"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[_models.UserDelegationKey] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(key_info, "KeyInfo", is_xml=True)
+
+        _request = build_get_user_delegation_key_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("UserDelegationKey", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_account_info(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns the sku name and account kind.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["account"] = kwargs.pop("restype", _params.pop("restype", "account"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_account_info_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name"))
+        response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind"))
+        response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def submit_batch(
+        self,
+        content_length: int,
+        body: IO[bytes],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """The Batch operation allows multiple API calls to be embedded into a single HTTP request.
+
+        :param content_length: The length of the request. Required.
+        :type content_length: int
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["batch"] = kwargs.pop("comp", _params.pop("comp", "batch"))
+        multipart_content_type: str = kwargs.pop(
+            "multipart_content_type", _headers.pop("Content-Type", "application/xml")
+        )
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _content = body
+
+        _request = build_submit_batch_request(
+            url=self._config.url,
+            content_length=content_length,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            multipart_content_type=multipart_content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def filter_blobs(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        where: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.FilterBlobsIncludeItem]]] = None,
+        **kwargs: Any
+    ) -> _models.FilterBlobSegment:
+        # pylint: disable=line-too-long
+        """The Filter Blobs operation enables callers to list blobs across all containers whose tags match
+        a given search expression.  Filter blobs searches across all containers within a storage
+        account but can be scoped within the expression to a single container.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param where: Filters the results to return only to return only blobs whose tags match the
+         specified expression. Default value is None.
+        :type where: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of containers to return. If the request does
+         not specify maxresults, or specifies a value greater than 5000, the server will return up to
+         5000 items. Note that if the listing operation crosses a partition boundary, then the service
+         will return a continuation token for retrieving the remainder of the results. For this reason,
+         it is possible that the service will return fewer results than specified by maxresults, or than
+         the default of 5000. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem]
+        :return: FilterBlobSegment or the result of cls(response)
+        :rtype: ~azure.storage.blob.models.FilterBlobSegment
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["blobs"] = kwargs.pop("comp", _params.pop("comp", "blobs"))
+        cls: ClsType[_models.FilterBlobSegment] = kwargs.pop("cls", None)
+
+        _request = build_filter_blobs_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            where=where,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("FilterBlobSegment", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/py.typed
new file mode 100644
index 00000000..e5aff4f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_generated/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_lease.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_lease.py
new file mode 100644
index 00000000..b8b5684d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_lease.py
@@ -0,0 +1,341 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import Any, Optional, Union, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+
+from ._shared.response_handlers import process_storage_error, return_response_headers
+from ._serialize import get_modify_conditions
+
+if TYPE_CHECKING:
+    from azure.storage.blob import BlobClient, ContainerClient
+    from datetime import datetime
+
+
+class BlobLeaseClient(): # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new BlobLeaseClient.
+
+    This client provides lease operations on a BlobClient or ContainerClient.
+    :param client: The client of the blob or container to lease.
+    :type client: Union[BlobClient, ContainerClient]
+    :param lease_id: A string representing the lease ID of an existing lease. This value does not need to be
+    specified in order to acquire a new lease, or break one.
+    :type lease_id: Optional[str]
+    """
+
+    id: str
+    """The ID of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired."""
+    etag: Optional[str]
+    """The ETag of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired or modified."""
+    last_modified: Optional["datetime"]
+    """The last modified timestamp of the lease currently being maintained.
+    This will be `None` if no lease has yet been acquired or modified."""
+
+    def __init__( # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["BlobClient", "ContainerClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'blob_name'):
+            self._client = client._client.blob
+        elif hasattr(client, 'container_name'):
+            self._client = client._client.container
+        else:
+            raise TypeError("Lease must use either BlobClient or ContainerClient.")
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.release()
+
+    @distributed_trace
+    def acquire(self, lease_duration: int = -1, **kwargs: Any) -> None:
+        """Requests a new lease.
+
+        If the container does not have an active lease, the Blob service creates a
+        lease on the container and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace
+    def renew(self, **kwargs: Any) -> None:
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the container or blob. Note that
+        the lease may be renewed even if it has expired as long as the container
+        or blob has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def release(self, **kwargs: Any) -> None:
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the container or blob. Releasing the lease allows another client
+        to immediately acquire the lease for the container or blob as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def break_lease(self, lease_break_period: Optional[int] = None, **kwargs: Any) -> int:
+        """Break the lease, if the container or blob has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the container or blob.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response = self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                break_period=lease_break_period,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time') # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_list_blobs_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_list_blobs_helper.py
new file mode 100644
index 00000000..5e357cea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_list_blobs_helper.py
@@ -0,0 +1,328 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Callable, cast, List, Optional, Tuple, Union
+from urllib.parse import unquote
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged, PageIterator
+
+from ._deserialize import (
+    get_blob_properties_from_generated_code,
+    load_many_xml_nodes,
+    load_xml_int,
+    load_xml_string,
+    parse_tags
+)
+from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem
+from ._generated._serialization import Deserializer
+from ._models import BlobProperties, FilteredBlob
+from ._shared.models import DictMixin
+from ._shared.response_handlers import (
+    process_storage_error,
+    return_context_and_deserialized,
+    return_raw_deserialized
+)
+
+
+class IgnoreListBlobsDeserializer(Deserializer):
+    def __call__(self, target_obj, response_data, content_type=None):  # pylint: disable=inconsistent-return-statements
+        if target_obj == "ListBlobsFlatSegmentResponse":
+            return None
+        super().__call__(target_obj, response_data, content_type)
+
+
+class BlobPropertiesPaged(PageIterator):
+    """An Iterable of Blob properties."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: str,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        location_mode: Optional[str] = None,
+    ) -> None:
+        super(BlobPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = cast(Tuple[Optional[str], Any], get_next_return)
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item: Union[BlobItemInternal, BlobProperties]) -> BlobProperties:
+        if isinstance(item, BlobProperties):
+            return item
+        if isinstance(item, BlobItemInternal):
+            blob = get_blob_properties_from_generated_code(item)
+            blob.container = self.container  # type: ignore [assignment]
+            return blob
+        return item
+
+
+class BlobNamesPaged(PageIterator):
+    """An Iterable of Blob names."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of blobs to retrieve per call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(BlobNamesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_raw_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.get('ServiceEndpoint')
+        self.prefix = load_xml_string(self._response, 'Prefix')
+        self.marker = load_xml_string(self._response, 'Marker')
+        self.results_per_page = load_xml_int(self._response, 'MaxResults')
+        self.container = self._response.get('ContainerName')
+
+        blobs = load_many_xml_nodes(self._response, 'Blob', wrapper='Blobs')
+        self.current_page = [load_xml_string(blob, 'Name') for blob in blobs]
+
+        next_marker = load_xml_string(self._response, 'NextMarker')
+        return next_marker or None, self.current_page
+
+
+class BlobPrefixPaged(BlobPropertiesPaged):
+    def __init__(self, *args, **kwargs):
+        super(BlobPrefixPaged, self).__init__(*args, **kwargs)
+        self.name = self.prefix
+
+    def _extract_data_cb(self, get_next_return):
+        continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
+        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return continuation_token, self.current_page
+
+    def _build_item(self, item):
+        item = super(BlobPrefixPaged, self)._build_item(item)
+        if isinstance(item, GenBlobPrefix):
+            if item.name.encoded:
+                name = unquote(item.name.content)
+            else:
+                name = item.name.content
+            return BlobPrefix(
+                self._command,
+                container=self.container,
+                prefix=name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
+
+
+class BlobPrefix(ItemPaged, DictMixin):
+    """An Iterable of Blob properties.
+
+    Returned from walk_blobs when a delimiter is used.
+    Can be thought of as a virtual blob directory."""
+
+    name: str
+    """The prefix, or "directory name" of the blob."""
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: str
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    next_marker: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: str
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    delimiter: str
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+    container: str
+    """The name of the container."""
+
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
+        self.name = kwargs.get('prefix')  # type: ignore [assignment]
+        self.prefix = kwargs.get('prefix')  # type: ignore [assignment]
+        self.results_per_page = kwargs.get('results_per_page')
+        self.container = kwargs.get('container')  # type: ignore [assignment]
+        self.delimiter = kwargs.get('delimiter')  # type: ignore [assignment]
+        self.location_mode = kwargs.get('location_mode')  # type: ignore [assignment]
+
+
+class FilteredBlobPaged(PageIterator):
+    """An Iterable of Blob properties."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+    container: Optional[str]
+    """The name of the container."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(FilteredBlobPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.marker = continuation_token
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.marker = self._response.next_marker
+        self.current_page = [self._build_item(item) for item in self._response.blobs]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, FilterBlobItem):
+            tags = parse_tags(item.tags)
+            blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags)
+            return blob
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_models.py
new file mode 100644
index 00000000..6cb1de76
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_models.py
@@ -0,0 +1,1507 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+
+from enum import Enum
+from typing import Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.paging import PageIterator
+from azure.core.exceptions import HttpResponseError
+
+from ._shared import decode_base64_to_bytes
+from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
+from ._shared.models import DictMixin, get_enum_value
+from ._generated.models import AccessPolicy as GenAccessPolicy
+from ._generated.models import ArrowField
+from ._generated.models import CorsRule as GeneratedCorsRule
+from ._generated.models import Logging as GeneratedLogging
+from ._generated.models import Metrics as GeneratedMetrics
+from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
+from ._generated.models import StaticWebsite as GeneratedStaticWebsite
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from ._generated.models import PageList
+
+# Parse a generated PageList into a single list of PageRange sorted by start.
+def parse_page_list(page_list: "PageList") -> List["PageRange"]:
+
+    page_ranges = page_list.page_range
+    clear_ranges = page_list.clear_range
+
+    if page_ranges is None:
+        raise ValueError("PageList's 'page_range' is malformed or None.")
+    if clear_ranges is None:
+        raise ValueError("PageList's 'clear_ranges' is malformed or None.")
+
+    ranges = []
+    p_i, c_i = 0, 0
+
+    # Combine page ranges and clear ranges into single list, sorted by start
+    while p_i < len(page_ranges) and c_i < len(clear_ranges):
+        p, c = page_ranges[p_i], clear_ranges[c_i]
+
+        if p.start < c.start:
+            ranges.append(
+                PageRange(start=p.start, end=p.end, cleared=False)
+            )
+            p_i += 1
+        else:
+            ranges.append(
+                PageRange(start=c.start, end=c.end, cleared=True)
+            )
+            c_i += 1
+
+    # Grab remaining elements in either list
+    ranges += [PageRange(start=r.start, end=r.end, cleared=False) for r in page_ranges[p_i:]]
+    ranges += [PageRange(start=r.start, end=r.end, cleared=True) for r in clear_ranges[c_i:]]
+
+    return ranges
+
+
+class BlobType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    BLOCKBLOB = "BlockBlob"
+    PAGEBLOB = "PageBlob"
+    APPENDBLOB = "AppendBlob"
+
+
+class BlockState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Block blob block types."""
+
+    COMMITTED = 'Committed'  #: Committed blocks.
+    LATEST = 'Latest'  #: Latest blocks.
+    UNCOMMITTED = 'Uncommitted'  #: Uncommitted blocks.
+
+
+class StandardBlobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies the blob tier to set the blob to. This is only applicable for
+    block blobs on standard storage accounts.
+    """
+
+    ARCHIVE = 'Archive'  #: Archive
+    COOL = 'Cool'  #: Cool
+    COLD = 'Cold'  #: Cold
+    HOT = 'Hot'  #: Hot
+
+
+class PremiumPageBlobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies the page blob tier to set the blob to. This is only applicable to page
+    blobs on premium storage accounts. Please take a look at:
+    https://learn.microsoft.com/azure/storage/storage-premium-storage#scalability-and-performance-targets
+    for detailed information on the corresponding IOPS and throughput per PageBlobTier.
+    """
+
+    P4 = 'P4'  #: P4 Tier
+    P6 = 'P6'  #: P6 Tier
+    P10 = 'P10'  #: P10 Tier
+    P15 = 'P15'  #: P15 Tier
+    P20 = 'P20'  #: P20 Tier
+    P30 = 'P30'  #: P30 Tier
+    P40 = 'P40'  #: P40 Tier
+    P50 = 'P50'  #: P50 Tier
+    P60 = 'P60'  #: P60 Tier
+
+
+class QuickQueryDialect(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Specifies the quick query input/output dialect."""
+
+    DELIMITEDTEXT = 'DelimitedTextDialect'
+    DELIMITEDJSON = 'DelimitedJsonDialect'
+    PARQUET = 'ParquetDialect'
+
+
+class SequenceNumberAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Sequence number actions."""
+
+    INCREMENT = 'increment'
+    """
+    Increments the value of the sequence number by 1. If specifying this option,
+    do not include the x-ms-blob-sequence-number header.
+    """
+
+    MAX = 'max'
+    """
+    Sets the sequence number to be the higher of the value included with the
+    request and the value currently stored for the blob.
+    """
+
+    UPDATE = 'update'
+    """Sets the sequence number to the value included with the request."""
+
+
+class PublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies whether data in the container may be accessed publicly and the level of access.
+    """
+
+    OFF = 'off'
+    """
+    Specifies that there is no public read access for both the container and blobs within the container.
+    Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
+    """
+
+    BLOB = 'blob'
+    """
+    Specifies public read access for blobs. Blob data within this container can be read
+    via anonymous request, but container data is not available. Clients cannot enumerate
+    blobs within the container via anonymous request.
+    """
+
+    CONTAINER = 'container'
+    """
+    Specifies full public read access for container and blob data. Clients can enumerate
+    blobs within the container via anonymous request, but cannot enumerate containers
+    within the storage account.
+    """
+
+
+class BlobImmutabilityPolicyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies the immutability policy mode to set on the blob.
+    "Mutable" can only be returned by service, don't set to "Mutable".
+    """
+
+    UNLOCKED = "Unlocked"
+    LOCKED = "Locked"
+    MUTABLE = "Mutable"
+
+
+class RetentionPolicy(GeneratedRetentionPolicy):
+    """The retention policy which determines how long the associated data should
+    persist.
+
+    :param bool enabled:
+        Indicates whether a retention policy is enabled for the storage service.
+        The default value is False.
+    :param Optional[int] days:
+        Indicates the number of days that metrics or logging or
+        soft-deleted data should be retained. All data older than this value will
+        be deleted. If enabled=True, the number of days must be specified.
+    """
+
+    enabled: bool = False
+    days: Optional[int] = None
+
+    def __init__(self, enabled: bool = False, days: Optional[int] = None) -> None:
+        super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None)
+        if self.enabled and (self.days is None):
+            raise ValueError("If policy is enabled, 'days' must be specified.")
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            days=generated.days,
+        )
+
+
+class BlobAnalyticsLogging(GeneratedLogging):
+    """Azure Analytics Logging settings.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool delete:
+        Indicates whether all delete requests should be logged. The default value is `False`.
+    :keyword bool read:
+        Indicates whether all read requests should be logged. The default value is `False`.
+    :keyword bool write:
+        Indicates whether all write requests should be logged. The default value is `False`.
+    :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    version: str = '1.0'
+    """The version of Storage Analytics to configure."""
+    delete: bool = False
+    """Indicates whether all delete requests should be logged."""
+    read: bool = False
+    """Indicates whether all read requests should be logged."""
+    write: bool = False
+    """Indicates whether all write requests should be logged."""
+    retention_policy: RetentionPolicy = RetentionPolicy()
+    """Determines how long the associated data should persist."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.version = kwargs.get('version', '1.0')
+        self.delete = kwargs.get('delete', False)
+        self.read = kwargs.get('read', False)
+        self.write = kwargs.get('write', False)
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            delete=generated.delete,
+            read=generated.read,
+            write=generated.write,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class Metrics(GeneratedMetrics):
+    """A summary of request statistics grouped by API in hour or minute aggregates
+    for blobs.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool enabled:
+        Indicates whether metrics are enabled for the Blob service.
+        The default value is `False`.
+    :keyword bool include_apis:
+        Indicates whether metrics should generate summary statistics for called API operations.
+    :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    version: str = '1.0'
+    """The version of Storage Analytics to configure."""
+    enabled: bool = False
+    """Indicates whether metrics are enabled for the Blob service."""
+    include_apis: Optional[bool]
+    """Indicates whether metrics should generate summary statistics for called API operations."""
+    retention_policy: RetentionPolicy = RetentionPolicy()
+    """Determines how long the associated data should persist."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.version = kwargs.get('version', '1.0')
+        self.enabled = kwargs.get('enabled', False)
+        self.include_apis = kwargs.get('include_apis')
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            enabled=generated.enabled,
+            include_apis=generated.include_apis,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class StaticWebsite(GeneratedStaticWebsite):
+    """The properties that enable an account to host a static website.
+
+    :keyword bool enabled:
+        Indicates whether this account is hosting a static website.
+        The default value is `False`.
+    :keyword str index_document:
+        The default name of the index page under each directory.
+    :keyword str error_document404_path:
+        The absolute path of the custom 404 page.
+    :keyword str default_index_document_path:
+        Absolute path of the default index page.
+    """
+
+    enabled: bool = False
+    """Indicates whether this account is hosting a static website."""
+    index_document: Optional[str]
+    """The default name of the index page under each directory."""
+    error_document404_path: Optional[str]
+    """The absolute path of the custom 404 page."""
+    default_index_document_path: Optional[str]
+    """Absolute path of the default index page."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.enabled = kwargs.get('enabled', False)
+        if self.enabled:
+            self.index_document = kwargs.get('index_document')
+            self.error_document404_path = kwargs.get('error_document404_path')
+            self.default_index_document_path = kwargs.get('default_index_document_path')
+        else:
+            self.index_document = None
+            self.error_document404_path = None
+            self.default_index_document_path = None
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            index_document=generated.index_document,
+            error_document404_path=generated.error_document404_path,
+            default_index_document_path=generated.default_index_document_path
+        )
+
+
+class CorsRule(GeneratedCorsRule):
+    """CORS is an HTTP feature that enables a web application running under one
+    domain to access resources in another domain. Web browsers implement a
+    security restriction known as same-origin policy that prevents a web page
+    from calling APIs in a different domain; CORS provides a secure way to
+    allow one domain (the origin domain) to call APIs in another domain.
+
+    :param list(str) allowed_origins:
+        A list of origin domains that will be allowed via CORS, or "*" to allow
+        all domains. The list of must contain at least one entry. Limited to 64
+        origin domains. Each allowed origin can have up to 256 characters.
+    :param list(str) allowed_methods:
+        A list of HTTP methods that are allowed to be executed by the origin.
+        The list of must contain at least one entry. For Azure Storage,
+        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+    :keyword list(str) allowed_headers:
+        Defaults to an empty list. A list of headers allowed to be part of
+        the cross-origin request. Limited to 64 defined headers and 2 prefixed
+        headers. Each header can be up to 256 characters.
+    :keyword list(str) exposed_headers:
+        Defaults to an empty list. A list of response headers to expose to CORS
+        clients. Limited to 64 defined headers and two prefixed headers. Each
+        header can be up to 256 characters.
+    :keyword int max_age_in_seconds:
+        The number of seconds that the client/browser should cache a
+        preflight response.
+    """
+
+    allowed_origins: str
+    """The comma-delimited string representation of the list of origin domains that will be allowed via
+        CORS, or "*" to allow all domains."""
+    allowed_methods: str
+    """The comma-delimited string representation of the list HTTP methods that are allowed to be executed
+        by the origin."""
+    exposed_headers: str
+    """The comma-delimited string representation of the list of response headers to expose to CORS clients."""
+    allowed_headers: str
+    """The comma-delimited string representation of the list of headers allowed to be part of the cross-origin
+        request."""
+    max_age_in_seconds: int
+    """The number of seconds that the client/browser should cache a pre-flight response."""
+
+    def __init__(self, allowed_origins: List[str], allowed_methods: List[str], **kwargs: Any) -> None:
+        self.allowed_origins = ','.join(allowed_origins)
+        self.allowed_methods = ','.join(allowed_methods)
+        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+    @staticmethod
+    def _to_generated(rules: Optional[List["CorsRule"]]) -> Optional[List[GeneratedCorsRule]]:
+        if rules is None:
+            return rules
+
+        generated_cors_list = []
+        for cors_rule in rules:
+            generated_cors = GeneratedCorsRule(
+                allowed_origins=cors_rule.allowed_origins,
+                allowed_methods=cors_rule.allowed_methods,
+                allowed_headers=cors_rule.allowed_headers,
+                exposed_headers=cors_rule.exposed_headers,
+                max_age_in_seconds=cors_rule.max_age_in_seconds
+            )
+            generated_cors_list.append(generated_cors)
+
+        return generated_cors_list
+
+    @classmethod
+    def _from_generated(cls, generated):
+        return cls(
+            [generated.allowed_origins],
+            [generated.allowed_methods],
+            allowed_headers=[generated.allowed_headers],
+            exposed_headers=[generated.exposed_headers],
+            max_age_in_seconds=generated.max_age_in_seconds,
+        )
+
+
+class ContainerProperties(DictMixin):
+    """Blob container's properties class.
+
+    Returned ``ContainerProperties`` instances expose these values through a
+    dictionary interface, for example: ``container_props["last_modified"]``.
+    Additionally, the container name is available as ``container_props["name"]``."""
+
+    name: str
+    """Name of the container."""
+    last_modified: "datetime"
+    """A datetime object representing the last time the container was modified."""
+    etag: str
+    """The ETag contains a value that you can use to perform operations conditionally."""
+    lease: "LeaseProperties"
+    """Stores all the lease information for the container."""
+    public_access: Optional[str]
+    """Specifies whether data in the container may be accessed publicly and the level of access."""
+    has_immutability_policy: bool
+    """Represents whether the container has an immutability policy."""
+    has_legal_hold: bool
+    """Represents whether the container has a legal hold."""
+    immutable_storage_with_versioning_enabled: bool
+    """Represents whether immutable storage with versioning enabled on the container."""
+    metadata: Dict[str, Any]
+    """A dict with name-value pairs to associate with the container as metadata."""
+    encryption_scope: Optional["ContainerEncryptionScope"]
+    """The default encryption scope configuration for the container."""
+    deleted: Optional[bool]
+    """Whether this container was deleted."""
+    version: Optional[str]
+    """The version of a deleted container."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = None  # type: ignore [assignment]
+        self.last_modified = kwargs.get('Last-Modified')  # type: ignore [assignment]
+        self.etag = kwargs.get('ETag')  # type: ignore [assignment]
+        self.lease = LeaseProperties(**kwargs)
+        self.public_access = kwargs.get('x-ms-blob-public-access')
+        self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy')  # type: ignore [assignment]
+        self.deleted = None
+        self.version = None
+        self.has_legal_hold = kwargs.get('x-ms-has-legal-hold')  # type: ignore [assignment]
+        self.metadata = kwargs.get('metadata')  # type: ignore [assignment]
+        self.encryption_scope = None
+        self.immutable_storage_with_versioning_enabled = kwargs.get('x-ms-immutable-storage-with-versioning-enabled')  # type: ignore [assignment]  # pylint: disable=name-too-long
+        default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
+        if default_encryption_scope:
+            self.encryption_scope = ContainerEncryptionScope(
+                default_encryption_scope=default_encryption_scope,
+                prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
+            )
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = generated.name
+        props.last_modified = generated.properties.last_modified
+        props.etag = generated.properties.etag
+        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
+        props.public_access = generated.properties.public_access
+        props.has_immutability_policy = generated.properties.has_immutability_policy
+        props.immutable_storage_with_versioning_enabled = generated.properties.is_immutable_storage_with_versioning_enabled  # pylint: disable=line-too-long, name-too-long
+        props.deleted = generated.deleted
+        props.version = generated.version
+        props.has_legal_hold = generated.properties.has_legal_hold
+        props.metadata = generated.metadata
+        props.encryption_scope = ContainerEncryptionScope._from_generated(generated)  #pylint: disable=protected-access
+        return props
+
+
+class ContainerPropertiesPaged(PageIterator):
+    """An Iterable of Container properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only containers whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of container names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A container name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results."""
+    current_page: List["ContainerProperties"]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(ContainerPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [self._build_item(item) for item in self._response.container_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        return ContainerProperties._from_generated(item)  # pylint: disable=protected-access
+
+
+class ImmutabilityPolicy(DictMixin):
+    """Optional parameters for setting the immutability policy of a blob, blob snapshot or blob version.
+
+    .. versionadded:: 12.10.0
+        This was introduced in API version '2020-10-02'.
+
+    :keyword ~datetime.datetime expiry_time:
+        Specifies the date time when the blobs immutability policy is set to expire.
+    :keyword str or ~azure.storage.blob.BlobImmutabilityPolicyMode policy_mode:
+        Specifies the immutability policy mode to set on the blob.
+        Possible values to set include: "Locked", "Unlocked".
+        "Mutable" can only be returned by service, don't set to "Mutable".
+    """
+
+    expiry_time: Optional["datetime"] = None
+    """Specifies the date time when the blobs immutability policy is set to expire."""
+    policy_mode: Optional[str] = None
+    """Specifies the immutability policy mode to set on the blob."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.expiry_time = kwargs.pop('expiry_time', None)
+        self.policy_mode = kwargs.pop('policy_mode', None)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        immutability_policy = cls()
+        immutability_policy.expiry_time = generated.properties.immutability_policy_expires_on
+        immutability_policy.policy_mode = generated.properties.immutability_policy_mode
+        return immutability_policy
+
+
+class FilteredBlob(DictMixin):
+    """Blob info from a Filter Blobs API call."""
+
+    name: str
+    """Blob name"""
+    container_name: Optional[str]
+    """Container name."""
+    tags: Optional[Dict[str, str]]
+    """Key value pairs of blob tags."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = kwargs.get('name', None)
+        self.container_name = kwargs.get('container_name', None)
+        self.tags = kwargs.get('tags', None)
+
+
+class LeaseProperties(DictMixin):
+    """Blob Lease Properties."""
+
+    status: str
+    """The lease status of the blob. Possible values: locked|unlocked"""
+    state: str
+    """Lease state of the blob. Possible values: available|leased|expired|breaking|broken"""
+    duration: Optional[str]
+    """When a blob is leased, specifies whether the lease is of infinite or fixed duration."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
+        self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
+        self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
+
+    @classmethod
+    def _from_generated(cls, generated):
+        lease = cls()
+        lease.status = get_enum_value(generated.properties.lease_status)
+        lease.state = get_enum_value(generated.properties.lease_state)
+        lease.duration = get_enum_value(generated.properties.lease_duration)
+        return lease
+
+
+class ContentSettings(DictMixin):
+    """The content settings of a blob.
+
+    :param Optional[str] content_type:
+        The content type specified for the blob. If no content type was
+        specified, the default content type is application/octet-stream.
+    :param Optional[str] content_encoding:
+        If the content_encoding has previously been set
+        for the blob, that value is stored.
+    :param Optional[str] content_language:
+        If the content_language has previously been set
+        for the blob, that value is stored.
+    :param Optional[str] content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the blob, that value is stored.
+    :param Optional[str] cache_control:
+        If the cache_control has previously been set for
+        the blob, that value is stored.
+    :param Optional[bytearray] content_md5:
+        If the content_md5 has been set for the blob, this response
+        header is stored so that the client can check for message content
+        integrity.
+    """
+
+    content_type: Optional[str] = None
+    """The content type specified for the blob."""
+    content_encoding: Optional[str] = None
+    """The content encoding specified for the blob."""
+    content_language: Optional[str] = None
+    """The content language specified for the blob."""
+    content_disposition: Optional[str] = None
+    """The content disposition specified for the blob."""
+    cache_control: Optional[str] = None
+    """The cache control specified for the blob."""
+    content_md5: Optional[bytearray] = None
+    """The content md5 specified for the blob."""
+
+    def __init__(
+        self, content_type: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_md5: Optional[bytearray] = None,
+        **kwargs: Any
+    ) -> None:
+
+        self.content_type = content_type or kwargs.get('Content-Type')
+        self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
+        self.content_language = content_language or kwargs.get('Content-Language')
+        self.content_md5 = content_md5 or kwargs.get('Content-MD5')
+        self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
+        self.cache_control = cache_control or kwargs.get('Cache-Control')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        settings = cls()
+        settings.content_type = generated.properties.content_type or None
+        settings.content_encoding = generated.properties.content_encoding or None
+        settings.content_language = generated.properties.content_language or None
+        settings.content_md5 = generated.properties.content_md5 or None
+        settings.content_disposition = generated.properties.content_disposition or None
+        settings.cache_control = generated.properties.cache_control or None
+        return settings
+
+
+class CopyProperties(DictMixin):
+    """Blob Copy Properties.
+
+    These properties will be `None` if this blob has never been the destination
+    in a Copy Blob operation, or if this blob has been modified after a concluded
+    Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List.
+    """
+
+    id: Optional[str]
+    """String identifier for the last attempted Copy Blob operation where this blob
+        was the destination blob."""
+    source: Optional[str]
+    """URL up to 2 KB in length that specifies the source blob used in the last attempted
+        Copy Blob operation where this blob was the destination blob."""
+    status: Optional[str]
+    """State of the copy operation identified by Copy ID, with these values:
+    success: Copy completed successfully.
+    pending: Copy is in progress. Check copy_status_description if intermittent, non-fatal errors impede copy progress
+    but don't cause failure.
+    aborted: Copy was ended by Abort Copy Blob.
+    failed: Copy failed. See copy_status_description for failure details."""
+    progress: Optional[str]
+    """Contains the number of bytes copied and the total bytes in the source in the last
+        attempted Copy Blob operation where this blob was the destination blob. Can show
+        between 0 and Content-Length bytes copied."""
+    completion_time: Optional["datetime"]
+    """Conclusion time of the last attempted Copy Blob operation where this blob was the
+        destination blob. This value can specify the time of a completed, aborted, or
+        failed copy attempt."""
+    status_description: Optional[str]
+    """Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
+        or non-fatal copy operation failure."""
+    incremental_copy: Optional[bool]
+    """Copies the snapshot of the source page blob to a destination page blob.
+        The snapshot is copied such that only the differential changes between
+        the previously copied snapshot are transferred to the destination."""
+    destination_snapshot: Optional["datetime"]
+    """Included if the blob is incremental copy blob or incremental copy snapshot,
+        if x-ms-copy-status is success. Snapshot time of the last successful
+        incremental copy snapshot for this blob."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.id = kwargs.get('x-ms-copy-id')
+        self.source = kwargs.get('x-ms-copy-source')
+        self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
+        self.progress = kwargs.get('x-ms-copy-progress')
+        self.completion_time = kwargs.get('x-ms-copy-completion-time')
+        self.status_description = kwargs.get('x-ms-copy-status-description')
+        self.incremental_copy = kwargs.get('x-ms-incremental-copy')
+        self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        copy = cls()
+        copy.id = generated.properties.copy_id or None
+        copy.status = get_enum_value(generated.properties.copy_status) or None
+        copy.source = generated.properties.copy_source or None
+        copy.progress = generated.properties.copy_progress or None
+        copy.completion_time = generated.properties.copy_completion_time or None
+        copy.status_description = generated.properties.copy_status_description or None
+        copy.incremental_copy = generated.properties.incremental_copy or None
+        copy.destination_snapshot = generated.properties.destination_snapshot or None
+        return copy
+
+
+class BlobBlock(DictMixin):
+    """BlockBlob Block class.
+
+    :param str block_id:
+        Block id.
+    :param BlockState state:
+        Block state. Possible values: BlockState.COMMITTED | BlockState.UNCOMMITTED
+    """
+
+    block_id: str
+    """Block id."""
+    state: BlockState
+    """Block state."""
+    size: int
+    """Block size."""
+
+    def __init__(self, block_id: str, state: BlockState = BlockState.LATEST) -> None:
+        self.id = block_id
+        self.state = state
+        self.size = None  # type: ignore [assignment]
+
+    @classmethod
+    def _from_generated(cls, generated):
+        try:
+            decoded_bytes = decode_base64_to_bytes(generated.name)
+            block_id = decoded_bytes.decode('utf-8')
+        # this is to fix a bug. When large blocks are uploaded through upload_blob the block id isn't base64 encoded
+        # while service expected block id is base64 encoded, so when we get block_id if we cannot base64 decode, it
+        # means we didn't base64 encode it when stage the block, we want to use the returned block_id directly.
+        except UnicodeDecodeError:
+            block_id = generated.name
+        block = cls(block_id)
+        block.size = generated.size
+        return block
+
+
+class PageRange(DictMixin):
+    """Page Range for page blob.
+
+    :param int start:
+        Start of page range in bytes.
+    :param int end:
+        End of page range in bytes.
+    """
+
+    start: Optional[int] = None
+    """Start of page range in bytes."""
+    end: Optional[int] = None
+    """End of page range in bytes."""
+    cleared: bool
+    """Whether the range has been cleared."""
+
+    def __init__(self, start: Optional[int] = None, end: Optional[int] = None, *, cleared: bool = False) -> None:
+        self.start = start
+        self.end = end
+        self.cleared = cleared
+
+
+class PageRangePaged(PageIterator):
+    def __init__(self, command, results_per_page=None, continuation_token=None):
+        super(PageRangePaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = self._build_page(self._response)
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_page(response):
+        if not response:
+            raise StopIteration
+
+        return parse_page_list(response)
+
+
+class ContainerSasPermissions(object):
+    """ContainerSasPermissions class to be used with the
+    :func:`~azure.storage.blob.generate_container_sas` function and
+    for the AccessPolicies used with
+    :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
+
+    :param bool read:
+        Read the content, properties, metadata or block list of any blob in the
+        container. Use any blob in the container as the source of a copy operation.
+    :param bool write:
+        For any blob in the container, create or write content, properties,
+        metadata, or block list. Snapshot or lease the blob. Resize the blob
+        (page blob only). Use the blob as the destination of a copy operation
+        within the same account. Note: You cannot grant permissions to read or
+        write container properties or metadata, nor to lease a container, with
+        a container SAS. Use an account SAS instead.
+    :param bool delete:
+        Delete any blob in the container. Note: You cannot grant permissions to
+        delete a container with a container SAS. Use an account SAS instead.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool list:
+        List blobs in the container.
+    :param bool tag:
+        Set or get tags on the blobs in the container.
+    :keyword bool add:
+        Add a block to an append blob.
+    :keyword bool create:
+        Write a new blob, snapshot a blob, or copy a blob to a new blob.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+    :keyword bool filter_by_tags:
+        To enable finding blobs by tags.
+    :keyword bool move:
+        Move a blob or a directory and its contents to a new location.
+    :keyword bool execute:
+        Get the system properties and, if the hierarchical namespace is enabled for the storage account,
+        get the POSIX ACL of a blob.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    """
+
+    read: bool = False
+    """The read permission for container SAS."""
+    write: bool = False
+    """The write permission for container SAS."""
+    delete: bool = False
+    """The delete permission for container SAS."""
+    delete_previous_version: bool = False
+    """Permission to delete previous blob version for versioning enabled
+        storage accounts."""
+    list: bool = False
+    """The list permission for container SAS."""
+    tag: bool = False
+    """Set or get tags on the blobs in the container."""
+    add: Optional[bool]
+    """Add a block to an append blob."""
+    create: Optional[bool]
+    """Write a new blob, snapshot a blob, or copy a blob to a new blob."""
+    permanent_delete: Optional[bool]
+    """To enable permanent delete on the blob is permitted."""
+    move: Optional[bool]
+    """Move a blob or a directory and its contents to a new location."""
+    execute: Optional[bool]
+    """Get the system properties and, if the hierarchical namespace is enabled for the storage account,
+        get the POSIX ACL of a blob."""
+    set_immutability_policy: Optional[bool]
+    """To get immutability policy, you just need read permission."""
+
+    def __init__(
+        self, read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,
+        delete_previous_version: bool = False,
+        tag: bool = False,
+        **kwargs: Any
+    ) -> None:
+        self.read = read
+        self.add = kwargs.pop('add', False)
+        self.create = kwargs.pop('create', False)
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.list = list
+        self.tag = tag
+        self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+        self.move = kwargs.pop('move', False)
+        self.execute = kwargs.pop('execute', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('l' if self.list else '') +
+                     ('t' if self.tag else '') +
+                     ('f' if self.filter_by_tags else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('i' if self.set_immutability_policy else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission: str) -> "ContainerSasPermissions":
+        """Create a ContainerSasPermissions from a string.
+
+        To specify read, write, delete, or list permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, write, delete,
+            and list permissions.
+        :return: A ContainerSasPermissions object
+        :rtype: ~azure.storage.blob.ContainerSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_list = 'l' in permission
+        p_tag = 't' in permission
+        p_filter_by_tags = 'f' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_set_immutability_policy = 'i' in permission
+        parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list,
+                     delete_previous_version=p_delete_previous_version, tag=p_tag, add=p_add,
+                     create=p_create, permanent_delete=p_permanent_delete, filter_by_tags=p_filter_by_tags,
+                     move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy)
+
+        return parsed
+
+
+class AccessPolicy(GenAccessPolicy):
+    """Access Policy class used by the set and get access policy methods in each service.
+
+    A stored access policy can specify the start time, expiry time, and
+    permissions for the Shared Access Signatures with which it's associated.
+    Depending on how you want to control access to your resource, you can
+    specify all of these parameters within the stored access policy, and omit
+    them from the URL for the Shared Access Signature. Doing so permits you to
+    modify the associated signature's behavior at any time, as well as to revoke
+    it. Or you can specify one or more of the access policy parameters within
+    the stored access policy, and the others on the URL. Finally, you can
+    specify all of the parameters on the URL. In this case, you can use the
+    stored access policy to revoke the signature, but not to modify its behavior.
+
+    Together the Shared Access Signature and the stored access policy must
+    include all fields required to authenticate the signature. If any required
+    fields are missing, the request will fail. Likewise, if a field is specified
+    both in the Shared Access Signature URL and in the stored access policy, the
+    request will fail with status code 400 (Bad Request).
+
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: Optional[Union[ContainerSasPermissions, str]]
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :paramtype expiry: Optional[Union[str, datetime]]
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. Azure will always convert values
+        to UTC. If a date is passed in without timezone info, it is assumed to
+        be UTC.
+    :paramtype start: Optional[Union[str, datetime]]
+    """
+
+    permission: Optional[Union[ContainerSasPermissions, str]]  # type: ignore [assignment]
+    """The permissions associated with the shared access signature. The user is restricted to
+        operations allowed by the permissions."""
+    expiry: Optional[Union["datetime", str]]  # type: ignore [assignment]
+    """The time at which the shared access signature becomes invalid."""
+    start: Optional[Union["datetime", str]]  # type: ignore [assignment]
+    """The time at which the shared access signature becomes valid."""
+
+    def __init__(
+        self, permission: Optional[Union["ContainerSasPermissions", str]] = None,
+        expiry: Optional[Union[str, "datetime"]] = None,
+        start: Optional[Union[str, "datetime"]] = None
+    ) -> None:
+        self.start = start
+        self.expiry = expiry
+        self.permission = permission
+
+
+class BlobSasPermissions(object):
+    """BlobSasPermissions class to be used with the
+    :func:`~azure.storage.blob.generate_blob_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata and block list. Use the blob as
+        the source of a copy operation.
+    :param bool add:
+        Add a block to an append blob.
+    :param bool create:
+        Write a new blob, snapshot a blob, or copy a blob to a new blob.
+    :param bool write:
+        Create or write content, properties, metadata, or block list. Snapshot
+        or lease the blob. Resize the blob (page blob only). Use the blob as the
+        destination of a copy operation within the same account.
+    :param bool delete:
+        Delete the blob.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool tag:
+        Set or get tags on the blob.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+    :keyword bool move:
+        Move a blob or a directory and its contents to a new location.
+    :keyword bool execute:
+        Get the system properties and, if the hierarchical namespace is enabled for the storage account,
+        get the POSIX ACL of a blob.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    """
+
+    read: bool = False
+    """The read permission for Blob SAS."""
+    add: Optional[bool]
+    """The add permission for Blob SAS."""
+    create: Optional[bool]
+    """Write a new blob, snapshot a blob, or copy a blob to a new blob."""
+    write: bool = False
+    """The write permission for Blob SAS."""
+    delete: bool = False
+    """The delete permission for Blob SAS."""
+    delete_previous_version: bool = False
+    """Permission to delete previous blob version for versioning enabled
+        storage accounts."""
+    tag: bool = False
+    """Set or get tags on the blobs in the Blob."""
+    permanent_delete: Optional[bool]
+    """To enable permanent delete on the blob is permitted."""
+    move: Optional[bool]
+    """Move a blob or a directory and its contents to a new location."""
+    execute: Optional[bool]
+    """Get the system properties and, if the hierarchical namespace is enabled for the storage account,
+        get the POSIX ACL of a blob."""
+    set_immutability_policy: Optional[bool]
+    """To get immutability policy, you just need read permission."""
+
+    def __init__(
+        self, read: bool = False,
+        add: bool = False,
+        create: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        delete_previous_version: bool = False,
+        tag: bool = False,
+        **kwargs: Any
+    ) -> None:
+        self.read = read
+        self.add = add
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.tag = tag
+        self.move = kwargs.pop('move', False)
+        self.execute = kwargs.pop('execute', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('t' if self.tag else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('i' if self.set_immutability_policy else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission: str) -> "BlobSasPermissions":
+        """Create a BlobSasPermissions from a string.
+
+        To specify read, add, create, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A BlobSasPermissions object
+        :rtype: ~azure.storage.blob.BlobSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_tag = 't' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_set_immutability_policy = 'i' in permission
+
+        parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete,
+                     delete_previous_version=p_delete_previous_version, tag=p_tag, permanent_delete=p_permanent_delete,
+                     move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy)
+
+        return parsed
+
+
+class CustomerProvidedEncryptionKey(object):
+    """
+    All data in Azure Storage is encrypted at-rest using an account-level encryption key.
+    In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents
+    and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service.
+
+    When you use a customer-provided key, Azure Storage does not manage or persist your key.
+    When writing data to a blob, the provided key is used to encrypt your data before writing it to disk.
+    A SHA-256 hash of the encryption key is written alongside the blob contents,
+    and is used to verify that all subsequent operations against the blob use the same encryption key.
+    This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob.
+    When reading a blob, the provided key is used to decrypt your data after reading it from disk.
+    In both cases, the provided encryption key is securely discarded
+    as soon as the encryption or decryption process completes.
+
+    :param str key_value:
+        Base64-encoded AES-256 encryption key value.
+    :param str key_hash:
+        Base64-encoded SHA256 of the encryption key.
+    """
+
+    key_value: str
+    """Base64-encoded AES-256 encryption key value."""
+    key_hash: str
+    """Base64-encoded SHA256 of the encryption key."""
+    algorithm: str
+    """Specifies the algorithm to use when encrypting data using the given key. Must be AES256."""
+
+    def __init__(self, key_value: str, key_hash: str) -> None:
+        self.key_value = key_value
+        self.key_hash = key_hash
+        self.algorithm = 'AES256'
+
+
+class ContainerEncryptionScope(object):
+    """The default encryption scope configuration for a container.
+
+    This scope is used implicitly for all future writes within the container,
+    but can be overridden per blob operation.
+
+    .. versionadded:: 12.2.0
+
+    :param str default_encryption_scope:
+        Specifies the default encryption scope to set on the container and use for
+        all future writes.
+    :param bool prevent_encryption_scope_override:
+        If true, prevents any request from specifying a different encryption scope than the scope
+        set on the container. Default value is false.
+    """
+
+    default_encryption_scope: str
+    """Specifies the default encryption scope to set on the container and use for
+        all future writes."""
+    prevent_encryption_scope_override: bool
+    """If true, prevents any request from specifying a different encryption scope than the scope
+        set on the container."""
+
+    def __init__(self, default_encryption_scope: str, **kwargs: Any) -> None:
+        self.default_encryption_scope = default_encryption_scope
+        self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if generated.properties.default_encryption_scope:
+            scope = cls(
+                generated.properties.default_encryption_scope,
+                prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False
+            )
+            return scope
+        return None
+
+
+class DelimitedJsonDialect(DictMixin):
+    """Defines the input or output JSON serialization for a blob data query.
+
+    :keyword str delimiter: The line separator character, default value is '\\\\n'.
+    """
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.delimiter = kwargs.pop('delimiter', '\n')
+
+
+class DelimitedTextDialect(DictMixin):
+    """Defines the input or output delimited (CSV) serialization for a blob query request.
+
+    :keyword str delimiter:
+        Column separator, defaults to ','.
+    :keyword str quotechar:
+        Field quote, defaults to '"'.
+    :keyword str lineterminator:
+        Record separator, defaults to '\\\\n'.
+    :keyword str escapechar:
+        Escape char, defaults to empty.
+    :keyword bool has_header:
+        Whether the blob data includes headers in the first line. The default value is False, meaning that the
+        data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
+        of the first line.
+    """
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.delimiter = kwargs.pop('delimiter', ',')
+        self.quotechar = kwargs.pop('quotechar', '"')
+        self.lineterminator = kwargs.pop('lineterminator', '\n')
+        self.escapechar = kwargs.pop('escapechar', "")
+        self.has_header = kwargs.pop('has_header', False)
+
+
+class ArrowDialect(ArrowField):
+    """field of an arrow schema.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :param ~azure.storage.blob.ArrowType type: Arrow field type.
+    :keyword str name: The name of the field.
+    :keyword int precision: The precision of the field.
+    :keyword int scale: The scale of the field.
+    """
+
+    def __init__(self, type, **kwargs: Any) -> None:   # pylint: disable=redefined-builtin
+        super(ArrowDialect, self).__init__(type=type, **kwargs)
+
+
+class ArrowType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    INT64 = "int64"
+    BOOL = "bool"
+    TIMESTAMP_MS = "timestamp[ms]"
+    STRING = "string"
+    DOUBLE = "double"
+    DECIMAL = 'decimal'
+
+
+class ObjectReplicationRule(DictMixin):
+    """Policy id and rule ids applied to a blob."""
+
+    rule_id: str
+    """Rule id."""
+    status: str
+    """The status of the rule. It could be "Complete" or "Failed" """
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.rule_id = kwargs.pop('rule_id', None)  # type: ignore [assignment]
+        self.status = kwargs.pop('status', None)  # type: ignore [assignment]
+
+
+class ObjectReplicationPolicy(DictMixin):
+    """Policy id and rule ids applied to a blob."""
+
+    policy_id: str
+    """Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair."""
+    rules: List[ObjectReplicationRule]
+    """Within each policy there may be multiple replication rules.
+        e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3"""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.policy_id = kwargs.pop('policy_id', None)  # type: ignore [assignment]
+        self.rules = kwargs.pop('rules', [])
+
+
+class BlobProperties(DictMixin):
+    """Blob Properties."""
+
+    name: str
+    """The name of the blob."""
+    container: str
+    """The container in which the blob resides."""
+    snapshot: Optional[str]
+    """Datetime value that uniquely identifies the blob snapshot."""
+    blob_type: "BlobType"
+    """String indicating this blob's type."""
+    metadata: Dict[str, str]
+    """Name-value pairs associated with the blob as metadata."""
+    last_modified: "datetime"
+    """A datetime object representing the last time the blob was modified."""
+    etag: str
+    """The ETag contains a value that you can use to perform operations
+        conditionally."""
+    size: int
+    """The size of the content returned. If the entire blob was requested,
+        the length of blob in bytes. If a subset of the blob was requested, the
+        length of the returned subset."""
+    content_range: Optional[str]
+    """Indicates the range of bytes returned in the event that the client
+        requested a subset of the blob."""
+    append_blob_committed_block_count: Optional[int]
+    """(For Append Blobs) Number of committed blocks in the blob."""
+    is_append_blob_sealed: Optional[bool]
+    """Indicate if the append blob is sealed or not."""
+    page_blob_sequence_number: Optional[int]
+    """(For Page Blobs) Sequence number for page blob used for coordinating
+        concurrent writes."""
+    server_encrypted: bool
+    """Set to true if the blob is encrypted on the server."""
+    copy: "CopyProperties"
+    """Stores all the copy properties for the blob."""
+    content_settings: ContentSettings
+    """Stores all the content settings for the blob."""
+    lease: LeaseProperties
+    """Stores all the lease information for the blob."""
+    blob_tier: Optional[StandardBlobTier]
+    """Indicates the access tier of the blob. The hot tier is optimized
+        for storing data that is accessed frequently. The cool storage tier
+        is optimized for storing data that is infrequently accessed and stored
+        for at least a month. The archive tier is optimized for storing
+        data that is rarely accessed and stored for at least six months
+        with flexible latency requirements."""
+    rehydrate_priority: Optional[str]
+    """Indicates the priority with which to rehydrate an archived blob"""
+    blob_tier_change_time: Optional["datetime"]
+    """Indicates when the access tier was last changed."""
+    blob_tier_inferred: Optional[bool]
+    """Indicates whether the access tier was inferred by the service.
+        If false, it indicates that the tier was set explicitly."""
+    deleted: Optional[bool]
+    """Whether this blob was deleted."""
+    deleted_time: Optional["datetime"]
+    """A datetime object representing the time at which the blob was deleted."""
+    remaining_retention_days: Optional[int]
+    """The number of days that the blob will be retained before being permanently deleted by the service."""
+    creation_time: "datetime"
+    """Indicates when the blob was created, in UTC."""
+    archive_status: Optional[str]
+    """Archive status of blob."""
+    encryption_key_sha256: Optional[str]
+    """The SHA-256 hash of the provided encryption key."""
+    encryption_scope: Optional[str]
+    """A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the container, this value will override it if the
+        container-level scope is configured to allow overrides. Otherwise an error will be raised."""
+    request_server_encrypted: Optional[bool]
+    """Whether this blob is encrypted."""
+    object_replication_source_properties: Optional[List[ObjectReplicationPolicy]]
+    """Only present for blobs that have policy ids and rule ids applied to them."""
+    object_replication_destination_policy: Optional[str]
+    """Represents the Object Replication Policy Id that created this blob."""
+    last_accessed_on: Optional["datetime"]
+    """Indicates when the last Read/Write operation was performed on a Blob."""
+    tag_count: Optional[int]
+    """Tags count on this blob."""
+    tags: Optional[Dict[str, str]]
+    """Key value pair of tags on this blob."""
+    has_versions_only: Optional[bool]
+    """A true value indicates the root blob is deleted"""
+    immutability_policy: ImmutabilityPolicy
+    """Specifies the immutability policy of a blob, blob snapshot or blob version."""
+    has_legal_hold: Optional[bool]
+    """Specified if a legal hold should be set on the blob.
+        Currently this parameter of upload_blob() API is for BlockBlob only."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = kwargs.get('name')  # type: ignore [assignment]
+        self.container = None  # type: ignore [assignment]
+        self.snapshot = kwargs.get('x-ms-snapshot')
+        self.version_id = kwargs.get('x-ms-version-id')
+        self.is_current_version = kwargs.get('x-ms-is-current-version')
+        self.blob_type = BlobType(kwargs['x-ms-blob-type']) if (
+            kwargs.get('x-ms-blob-type')) else None  # type: ignore [assignment]
+        self.metadata = kwargs.get('metadata')  # type: ignore [assignment]
+        self.encrypted_metadata = kwargs.get('encrypted_metadata')
+        self.last_modified = kwargs.get('Last-Modified')  # type: ignore [assignment]
+        self.etag = kwargs.get('ETag')  # type: ignore [assignment]
+        self.size = kwargs.get('Content-Length')  # type: ignore [assignment]
+        self.content_range = kwargs.get('Content-Range')
+        self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count')
+        self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed')
+        self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number')
+        self.server_encrypted = kwargs.get('x-ms-server-encrypted')  # type: ignore [assignment]
+        self.copy = CopyProperties(**kwargs)
+        self.content_settings = ContentSettings(**kwargs)
+        self.lease = LeaseProperties(**kwargs)
+        self.blob_tier = kwargs.get('x-ms-access-tier')
+        self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority')
+        self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time')
+        self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred')
+        self.deleted = False
+        self.deleted_time = None
+        self.remaining_retention_days = None
+        self.creation_time = kwargs.get('x-ms-creation-time')  # type: ignore [assignment]
+        self.archive_status = kwargs.get('x-ms-archive-status')
+        self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256')
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+        self.request_server_encrypted = kwargs.get('x-ms-server-encrypted')
+        self.object_replication_source_properties = kwargs.get('object_replication_source_properties')
+        self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id')
+        self.last_accessed_on = kwargs.get('x-ms-last-access-time')
+        self.tag_count = kwargs.get('x-ms-tag-count')
+        self.tags = None
+        self.immutability_policy = ImmutabilityPolicy(expiry_time=kwargs.get('x-ms-immutability-policy-until-date'),
+                                                      policy_mode=kwargs.get('x-ms-immutability-policy-mode'))
+        self.has_legal_hold = kwargs.get('x-ms-legal-hold')
+        self.has_versions_only = None
+
+
+class BlobQueryError(object):
+    """The error happened during quick query operation."""
+
+    error: Optional[str]
+    """The name of the error."""
+    is_fatal: bool
+    """If true, this error prevents further query processing. More result data may be returned,
+        but there is no guarantee that all of the original data will be processed.
+        If false, this error does not prevent further query processing."""
+    description: Optional[str]
+    """A description of the error."""
+    position: Optional[int]
+    """The blob offset at which the error occurred."""
+
+    def __init__(
+        self, error: Optional[str] = None,
+        is_fatal: bool = False,
+        description: Optional[str] = None,
+        position: Optional[int] = None
+    ) -> None:
+        self.error = error
+        self.is_fatal = is_fatal
+        self.description = description
+        self.position = position
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_quick_query_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_quick_query_helper.py
new file mode 100644
index 00000000..95f8a442
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_quick_query_helper.py
@@ -0,0 +1,194 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from io import BytesIO
+from typing import Any, Dict, Generator, IO, Iterable, Optional, Type, Union, TYPE_CHECKING
+
+from ._shared.avro.avro_io import DatumReader
+from ._shared.avro.datafile import DataFileReader
+
+if TYPE_CHECKING:
+    from ._models import BlobQueryError
+
+
+class BlobQueryReader(object):  # pylint: disable=too-many-instance-attributes
+    """A streaming object to read query results."""
+
+    name: str
+    """The name of the blob being quered."""
+    container: str
+    """The name of the container where the blob is."""
+    response_headers: Dict[str, Any]
+    """The response_headers of the quick query request."""
+    record_delimiter: str
+    """The delimiter used to separate lines, or records with the data. The `records`
+    method will return these lines via a generator."""
+
+    def __init__(
+        self,
+        name: str = None,  # type: ignore [assignment]
+        container: str = None,  # type: ignore [assignment]
+        errors: Any = None,
+        record_delimiter: str = '\n',
+        encoding: Optional[str] = None,
+        headers: Dict[str, Any] = None,  # type: ignore [assignment]
+        response: Any = None,
+        error_cls: Type["BlobQueryError"] = None,  # type: ignore [assignment]
+    ) -> None:
+        self.name = name
+        self.container = container
+        self.response_headers = headers
+        self.record_delimiter = record_delimiter
+        self._size = 0
+        self._bytes_processed = 0
+        self._errors = errors
+        self._encoding = encoding
+        self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader())
+        self._first_result = self._process_record(next(self._parsed_results))
+        self._error_cls = error_cls
+
+    def __len__(self):
+        return self._size
+
+    def _process_record(self, result: Dict[str, Any]) -> Optional[bytes]:
+        self._size = result.get('totalBytes', self._size)
+        self._bytes_processed = result.get('bytesScanned', self._bytes_processed)
+        if 'data' in result:
+            return result.get('data')
+        if 'fatal' in result:
+            error = self._error_cls(
+                error=result['name'],
+                is_fatal=result['fatal'],
+                description=result['description'],
+                position=result['position']
+            )
+            if self._errors:
+                self._errors(error)
+        return None
+
+    def _iter_stream(self) -> Generator[bytes, None, None]:
+        if self._first_result is not None:
+            yield self._first_result
+        for next_result in self._parsed_results:
+            processed_result = self._process_record(next_result)
+            if processed_result is not None:
+                yield processed_result
+
+    def readall(self) -> Union[bytes, str]:
+        """Return all query results.
+
+        This operation is blocking until all data is downloaded.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: The query results.
+        :rtype: Union[bytes, str]
+        """
+        stream = BytesIO()
+        self.readinto(stream)
+        data = stream.getvalue()
+        if self._encoding:
+            return data.decode(self._encoding)
+        return data
+
+    def readinto(self, stream: IO) -> None:
+        """Download the query result to a stream.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream.
+        :returns: None
+        """
+        for record in self._iter_stream():
+            stream.write(record)
+
+    def records(self) -> Iterable[Union[bytes, str]]:
+        """Returns a record generator for the query result.
+
+        Records will be returned line by line.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: A record generator for the query result.
+        :rtype: Iterable[Union[bytes, str]]
+        """
+        delimiter = self.record_delimiter.encode('utf-8')
+        for record_chunk in self._iter_stream():
+            for record in record_chunk.split(delimiter):
+                if self._encoding:
+                    yield record.decode(self._encoding)
+                else:
+                    yield record
+
+
+class QuickQueryStreamer(object):
+    """
+    File-like streaming iterator.
+    """
+
+    def __init__(self, generator):
+        self.generator = generator
+        self.iterator = iter(generator)
+        self._buf = b""
+        self._point = 0
+        self._download_offset = 0
+        self._buf_start = 0
+        self.file_length = None
+
+    def __len__(self):
+        return self.file_length
+
+    def __iter__(self):
+        return self.iterator
+
+    @staticmethod
+    def seekable():
+        return True
+
+    def __next__(self):
+        next_part = next(self.iterator)
+        self._download_offset += len(next_part)
+        return next_part
+
+    def tell(self):
+        return self._point
+
+    def seek(self, offset, whence=0):
+        if whence == 0:
+            self._point = offset
+        elif whence == 1:
+            self._point += offset
+        else:
+            raise ValueError("whence must be 0, or 1")
+        if self._point < 0:    # pylint: disable=consider-using-max-builtin
+            self._point = 0  # XXX is this right?
+
+    def read(self, size):
+        try:
+            # keep reading from the generator until the buffer of this stream has enough data to read
+            while self._point + size > self._download_offset:
+                self._buf += self.__next__()
+        except StopIteration:
+            self.file_length = self._download_offset
+
+        start_point = self._point
+
+        # EOF
+        self._point = min(self._point + size, self._download_offset)
+
+        relative_start = start_point - self._buf_start
+        if relative_start < 0:
+            raise ValueError("Buffer has dumped too much data")
+        relative_end = relative_start + size
+        data = self._buf[relative_start: relative_end]
+
+        # dump the extra data in buffer
+        # buffer start--------------------16bytes----current read position
+        dumped_size = max(relative_end - 16 - relative_start, 0)
+        self._buf_start += dumped_size
+        self._buf = self._buf[dumped_size:]
+
+        return data
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_serialize.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_serialize.py
new file mode 100644
index 00000000..316e321c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_serialize.py
@@ -0,0 +1,214 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import Any, cast, Dict, Optional, Tuple, Union, TYPE_CHECKING
+
+try:
+    from urllib.parse import quote
+except ImportError:
+    from urllib2 import quote  # type: ignore
+
+from azure.core import MatchConditions
+
+from ._generated.models import (
+    ArrowConfiguration,
+    BlobTag,
+    BlobTags,
+    ContainerCpkScopeInfo,
+    CpkScopeInfo,
+    DelimitedTextConfiguration,
+    JsonTextConfiguration,
+    LeaseAccessConditions,
+    ModifiedAccessConditions,
+    QueryFormat,
+    QueryFormatType,
+    QuerySerialization,
+    SourceModifiedAccessConditions
+)
+from ._models import ContainerEncryptionScope, DelimitedJsonDialect
+
+if TYPE_CHECKING:
+    from ._lease import BlobLeaseClient
+
+
+_SUPPORTED_API_VERSIONS = [
+    '2019-02-02',
+    '2019-07-07',
+    '2019-10-10',
+    '2019-12-12',
+    '2020-02-10',
+    '2020-04-08',
+    '2020-06-12',
+    '2020-08-04',
+    '2020-10-02',
+    '2020-12-06',
+    '2021-02-12',
+    '2021-04-10',
+    '2021-06-08',
+    '2021-08-06',
+    '2021-12-02',
+    '2022-11-02',
+    '2023-01-03',
+    '2023-05-03',
+    '2023-08-03',
+    '2023-11-03',
+    '2024-05-04',
+    '2024-08-04',
+    '2024-11-04',
+    '2025-01-05',
+    '2025-05-05',
+]
+
+
+def _get_match_headers(
+    kwargs: Dict[str, Any],
+    match_param: str,
+    etag_param: str
+) -> Tuple[Optional[str], Optional[Any]]:
+    if_match = None
+    if_none_match = None
+    match_condition = kwargs.pop(match_param, None)
+    if match_condition == MatchConditions.IfNotModified:
+        if_match = kwargs.pop(etag_param, None)
+        if not if_match:
+            raise ValueError(f"'{match_param}' specified without '{etag_param}'.")
+    elif match_condition == MatchConditions.IfPresent:
+        if_match = '*'
+    elif match_condition == MatchConditions.IfModified:
+        if_none_match = kwargs.pop(etag_param, None)
+        if not if_none_match:
+            raise ValueError(f"'{match_param}' specified without '{etag_param}'.")
+    elif match_condition == MatchConditions.IfMissing:
+        if_none_match = '*'
+    elif match_condition is None:
+        if kwargs.get(etag_param):
+            raise ValueError(f"'{etag_param}' specified without '{match_param}'.")
+    else:
+        raise TypeError(f"Invalid match condition: {match_condition}")
+    return if_match, if_none_match
+
+
+def get_access_conditions(lease: Optional[Union["BlobLeaseClient", str]]) -> Optional[LeaseAccessConditions]:
+    try:
+        lease_id = lease.id # type: ignore
+    except AttributeError:
+        lease_id = lease # type: ignore
+    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+
+def get_modify_conditions(kwargs: Dict[str, Any]) -> ModifiedAccessConditions:
+    if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
+    return ModifiedAccessConditions(
+        if_modified_since=kwargs.pop('if_modified_since', None),
+        if_unmodified_since=kwargs.pop('if_unmodified_since', None),
+        if_match=if_match or kwargs.pop('if_match', None),
+        if_none_match=if_none_match or kwargs.pop('if_none_match', None),
+        if_tags=kwargs.pop('if_tags_match_condition', None)
+    )
+
+
+def get_source_conditions(kwargs: Dict[str, Any]) -> SourceModifiedAccessConditions:
+    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+    return SourceModifiedAccessConditions(
+        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+        source_if_match=if_match or kwargs.pop('source_if_match', None),
+        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None),
+        source_if_tags=kwargs.pop('source_if_tags_match_condition', None)
+    )
+
+
+def get_cpk_scope_info(kwargs: Dict[str, Any]) -> Optional[CpkScopeInfo]:
+    if 'encryption_scope' in kwargs:
+        return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope'))
+    return None
+
+
+def get_container_cpk_scope_info(kwargs: Dict[str, Any]) -> Optional[ContainerCpkScopeInfo]:
+    encryption_scope = kwargs.pop('container_encryption_scope', None)
+    if encryption_scope:
+        if isinstance(encryption_scope, ContainerEncryptionScope):
+            return ContainerCpkScopeInfo(
+                default_encryption_scope=encryption_scope.default_encryption_scope,
+                prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override
+            )
+        if isinstance(encryption_scope, dict):
+            return ContainerCpkScopeInfo(
+                default_encryption_scope=encryption_scope['default_encryption_scope'],
+                prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override')
+            )
+        raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.")
+    return None
+
+
+def get_api_version(kwargs: Dict[str, Any]) -> str:
+    api_version = kwargs.get('api_version', None)
+    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
+        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
+        raise ValueError(f"Unsupported API version '{api_version}'. Please select from:\n{versions}")
+    return api_version or _SUPPORTED_API_VERSIONS[-1]
+
+def get_version_id(self_vid: Optional[str], kwargs: Dict[str, Any]) -> Optional[str]:
+    if 'version_id' in kwargs:
+        return cast(str, kwargs.pop('version_id'))
+    return self_vid
+
+def serialize_blob_tags_header(tags: Optional[Dict[str, str]] = None) -> Optional[str]:
+    if tags is None:
+        return None
+
+    components = []
+    if tags:
+        for key, value in tags.items():
+            components.append(quote(key, safe='.-'))
+            components.append('=')
+            components.append(quote(value, safe='.-'))
+            components.append('&')
+
+    if components:
+        del components[-1]
+
+    return ''.join(components)
+
+
+def serialize_blob_tags(tags: Optional[Dict[str, str]] = None) -> BlobTags:
+    tag_list = []
+    if tags:
+        tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()]
+    return BlobTags(blob_tag_set=tag_list)
+
+
+def serialize_query_format(formater: Union[str, DelimitedJsonDialect]) -> Optional[QuerySerialization]:
+    if formater == "ParquetDialect":
+        qq_format = QueryFormat(type=QueryFormatType.PARQUET, parquet_text_configuration=' ')  #type: ignore [arg-type]
+    elif isinstance(formater, DelimitedJsonDialect):
+        json_serialization_settings = JsonTextConfiguration(record_separator=formater.delimiter)
+        qq_format = QueryFormat(type=QueryFormatType.JSON, json_text_configuration=json_serialization_settings)
+    elif hasattr(formater, 'quotechar'):  # This supports a csv.Dialect as well
+        try:
+            headers = formater.has_header  # type: ignore
+        except AttributeError:
+            headers = False
+        if isinstance(formater, str):
+            raise ValueError("Unknown string value provided. Accepted values: ParquetDialect")
+        csv_serialization_settings = DelimitedTextConfiguration(
+            column_separator=formater.delimiter,
+            field_quote=formater.quotechar,
+            record_separator=formater.lineterminator,
+            escape_char=formater.escapechar,
+            headers_present=headers
+        )
+        qq_format = QueryFormat(
+            type=QueryFormatType.DELIMITED,
+            delimited_text_configuration=csv_serialization_settings
+        )
+    elif isinstance(formater, list):
+        arrow_serialization_settings = ArrowConfiguration(schema=formater)
+        qq_format = QueryFormat(type=QueryFormatType.arrow, arrow_configuration=arrow_serialization_settings)
+    elif not formater:
+        return None
+    else:
+        raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect or ParquetDialect.")
+    return QuerySerialization(format=qq_format)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/__init__.py
new file mode 100644
index 00000000..a8b1a27d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/__init__.py
@@ -0,0 +1,54 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote # type: ignore
+
+
+def url_quote(url):
+    return quote(url)
+
+
+def url_unquote(url):
+    return unquote(url)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+    decoded_bytes = decode_base64_to_bytes(data)
+    return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+    if key_is_base64:
+        key = decode_base64_to_bytes(key)
+    else:
+        if isinstance(key, str):
+            key = key.encode('utf-8')
+    if isinstance(string_to_sign, str):
+        string_to_sign = string_to_sign.encode('utf-8')
+    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+    digest = signed_hmac_sha256.digest()
+    encoded_digest = encode_base64(digest)
+    return encoded_digest
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/authentication.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/authentication.py
new file mode 100644
index 00000000..b41f2391
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/authentication.py
@@ -0,0 +1,245 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import re
+from typing import List, Tuple
+from urllib.parse import unquote, urlparse
+from functools import cmp_to_key
+
+try:
+    from yarl import URL
+except ImportError:
+    pass
+
+try:
+    from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+except ImportError:
+    AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+logger = logging.getLogger(__name__)
+
+
+table_lv0 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725,
+    0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e,
+    0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51,
+    0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9,
+    0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25,
+    0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99,
+    0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0,
+]
+
+table_lv4 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+]
+
+def compare(lhs: str, rhs: str) -> int:  # pylint:disable=too-many-return-statements
+    tables = [table_lv0, table_lv4]
+    curr_level, i, j, n = 0, 0, 0, len(tables)
+    lhs_len = len(lhs)
+    rhs_len = len(rhs)
+    while curr_level < n:
+        if curr_level == (n - 1) and i != j:
+            if i > j:
+                return -1
+            if i < j:
+                return 1
+            return 0
+
+        w1 = tables[curr_level][ord(lhs[i])] if i < lhs_len else 0x1
+        w2 = tables[curr_level][ord(rhs[j])] if j < rhs_len else 0x1
+
+        if w1 == 0x1 and w2 == 0x1:
+            i = 0
+            j = 0
+            curr_level += 1
+        elif w1 == w2:
+            i += 1
+            j += 1
+        elif w1 == 0:
+            i += 1
+        elif w2 == 0:
+            j += 1
+        else:
+            if w1 < w2:
+                return -1
+            if w1 > w2:
+                return 1
+            return 0
+    return 0
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+    msg = ""
+    if ex.args:
+        msg = ex.args[0]
+    return desired_type(msg)
+
+# This method attempts to emulate the sorting done by the service
+def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
+
+    # Build dict of tuples and list of keys
+    header_dict = {}
+    header_keys = []
+    for k, v in input_headers:
+        header_dict[k] = v
+        header_keys.append(k)
+
+    try:
+        header_keys = sorted(header_keys, key=cmp_to_key(compare))
+    except ValueError as exc:
+        raise ValueError("Illegal character encountered when sorting headers.") from exc
+
+    # Build list of sorted tuples
+    sorted_headers = []
+    for key in header_keys:
+        sorted_headers.append((key, header_dict.pop(key)))
+    return sorted_headers
+
+
+class AzureSigningError(ClientAuthenticationError):
+    """
+    Represents a fatal error when attempting to sign a request.
+    In general, the cause of this exception is user error. For example, the given account key is not valid.
+    Please visit https://learn.microsoft.com/azure/storage/common/storage-create-storage-account for more info.
+    """
+
+
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+    def __init__(self, account_name, account_key):
+        self.account_name = account_name
+        self.account_key = account_key
+        super(SharedKeyCredentialPolicy, self).__init__()
+
+    @staticmethod
+    def _get_headers(request, headers_to_sign):
+        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+        if 'content-length' in headers and headers['content-length'] == '0':
+            del headers['content-length']
+        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+    @staticmethod
+    def _get_verb(request):
+        return request.http_request.method + '\n'
+
+    def _get_canonicalized_resource(self, request):
+        uri_path = urlparse(request.http_request.url).path
+        try:
+            if isinstance(request.context.transport, AioHttpTransport) or \
+                    isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+                    isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+                               AioHttpTransport):
+                uri_path = URL(uri_path)
+                return '/' + self.account_name + str(uri_path)
+        except TypeError:
+            pass
+        return '/' + self.account_name + uri_path
+
+    @staticmethod
+    def _get_canonicalized_headers(request):
+        string_to_sign = ''
+        x_ms_headers = []
+        for name, value in request.http_request.headers.items():
+            if name.startswith('x-ms-'):
+                x_ms_headers.append((name.lower(), value))
+        x_ms_headers = _storage_header_sort(x_ms_headers)
+        for name, value in x_ms_headers:
+            if value is not None:
+                string_to_sign += ''.join([name, ':', value, '\n'])
+        return string_to_sign
+
+    @staticmethod
+    def _get_canonicalized_resource_query(request):
+        sorted_queries = list(request.http_request.query.items())
+        sorted_queries.sort()
+
+        string_to_sign = ''
+        for name, value in sorted_queries:
+            if value is not None:
+                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+        return string_to_sign
+
+    def _add_authorization_header(self, request, string_to_sign):
+        try:
+            signature = sign_string(self.account_key, string_to_sign)
+            auth_string = 'SharedKey ' + self.account_name + ':' + signature
+            request.http_request.headers['Authorization'] = auth_string
+        except Exception as ex:
+            # Wrap any error that occurred as signing error
+            # Doing so will clarify/locate the source of problem
+            raise _wrap_exception(ex, AzureSigningError) from ex
+
+    def on_request(self, request):
+        string_to_sign = \
+            self._get_verb(request) + \
+            self._get_headers(
+                request,
+                [
+                    'content-encoding', 'content-language', 'content-length',
+                    'content-md5', 'content-type', 'date', 'if-modified-since',
+                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+                ]
+            ) + \
+            self._get_canonicalized_headers(request) + \
+            self._get_canonicalized_resource(request) + \
+            self._get_canonicalized_resource_query(request)
+
+        self._add_authorization_header(request, string_to_sign)
+        # logger.debug("String_to_sign=%s", string_to_sign)
+
+
+class StorageHttpChallenge(object):
+    def __init__(self, challenge):
+        """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """
+        if not challenge:
+            raise ValueError("Challenge cannot be empty")
+
+        self._parameters = {}
+        self.scheme, trimmed_challenge = challenge.strip().split(" ", 1)
+
+        # name=value pairs either comma or space separated with values possibly being
+        # enclosed in quotes
+        for item in re.split('[, ]', trimmed_challenge):
+            comps = item.split("=")
+            if len(comps) == 2:
+                key = comps[0].strip(' "')
+                value = comps[1].strip(' "')
+                if key:
+                    self._parameters[key] = value
+
+        # Extract and verify required parameters
+        self.authorization_uri = self._parameters.get('authorization_uri')
+        if not self.authorization_uri:
+            raise ValueError("Authorization Uri not found")
+
+        self.resource_id = self._parameters.get('resource_id')
+        if not self.resource_id:
+            raise ValueError("Resource id not found")
+
+        uri_path = urlparse(self.authorization_uri).path.lstrip("/")
+        self.tenant_id = uri_path.split("/")[0]
+
+    def get_value(self, key):
+        return self._parameters.get(key)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/__init__.py
new file mode 100644
index 00000000..5b396cd2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/__init__.py
@@ -0,0 +1,5 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io.py
new file mode 100644
index 00000000..3e46f1fb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io.py
@@ -0,0 +1,435 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-missing-return, docstring-missing-rtype
+
+"""Input/output utilities.
+
+Includes:
+ - i/o-specific constants
+ - i/o-specific exceptions
+ - schema validation
+ - leaf value encoding and decoding
+ - datum reader/writer stuff (?)
+
+Also includes a generic representation for data, which uses the
+following mapping:
+ - Schema records are implemented as dict.
+ - Schema arrays are implemented as list.
+ - Schema maps are implemented as dict.
+ - Schema strings are implemented as unicode.
+ - Schema bytes are implemented as str.
+ - Schema ints are implemented as int.
+ - Schema longs are implemented as long.
+ - Schema floats are implemented as float.
+ - Schema doubles are implemented as float.
+ - Schema booleans are implemented as bool.
+"""
+
+import json
+import logging
+import struct
+import sys
+
+from ..avro import schema
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+STRUCT_FLOAT = struct.Struct('<f')  # little-endian float
+STRUCT_DOUBLE = struct.Struct('<d')  # little-endian double
+
+# ------------------------------------------------------------------------------
+# Exceptions
+
+
+class SchemaResolutionException(schema.AvroException):
+    def __init__(self, fail_msg, writer_schema=None):
+        pretty_writers = json.dumps(json.loads(str(writer_schema)), indent=2)
+        if writer_schema:
+            fail_msg += f"\nWriter's Schema: {pretty_writers}"
+        schema.AvroException.__init__(self, fail_msg)
+
+# ------------------------------------------------------------------------------
+# Decoder
+
+
+class BinaryDecoder(object):
+    """Read leaf values."""
+
+    def __init__(self, reader):
+        """
+        reader is a Python object on which we can call read, seek, and tell.
+        """
+        self._reader = reader
+
+    @property
+    def reader(self):
+        """Reports the reader used by this decoder."""
+        return self._reader
+
+    def read(self, n):
+        """Read n bytes.
+
+        :param int n: Number of bytes to read.
+        :returns: The next n bytes from the input.
+        :rtype: bytes
+        """
+        assert (n >= 0), n
+        input_bytes = self.reader.read(n)
+        if n > 0 and not input_bytes:
+            raise StopIteration
+        assert (len(input_bytes) == n), input_bytes
+        return input_bytes
+
+    @staticmethod
+    def read_null():
+        """
+        null is written as zero bytes
+        """
+        return None
+
+    def read_boolean(self):
+        """
+        a boolean is written as a single byte
+        whose value is either 0 (false) or 1 (true).
+        """
+        b = ord(self.read(1))
+        if b == 1:
+            return True
+        if b == 0:
+            return False
+        fail_msg = f"Invalid value for boolean: {b}"
+        raise schema.AvroException(fail_msg)
+
+    def read_int(self):
+        """
+        int and long values are written using variable-length, zig-zag coding.
+        """
+        return self.read_long()
+
+    def read_long(self):
+        """
+        int and long values are written using variable-length, zig-zag coding.
+        """
+        b = ord(self.read(1))
+        n = b & 0x7F
+        shift = 7
+        while (b & 0x80) != 0:
+            b = ord(self.read(1))
+            n |= (b & 0x7F) << shift
+            shift += 7
+        datum = (n >> 1) ^ -(n & 1)
+        return datum
+
+    def read_float(self):
+        """
+        A float is written as 4 bytes.
+        The float is converted into a 32-bit integer using a method equivalent to
+        Java's floatToIntBits and then encoded in little-endian format.
+        """
+        return STRUCT_FLOAT.unpack(self.read(4))[0]
+
+    def read_double(self):
+        """
+        A double is written as 8 bytes.
+        The double is converted into a 64-bit integer using a method equivalent to
+        Java's doubleToLongBits and then encoded in little-endian format.
+        """
+        return STRUCT_DOUBLE.unpack(self.read(8))[0]
+
+    def read_bytes(self):
+        """
+        Bytes are encoded as a long followed by that many bytes of data.
+        """
+        nbytes = self.read_long()
+        assert (nbytes >= 0), nbytes
+        return self.read(nbytes)
+
+    def read_utf8(self):
+        """
+        A string is encoded as a long followed by
+        that many bytes of UTF-8 encoded character data.
+        """
+        input_bytes = self.read_bytes()
+        if PY3:
+            try:
+                return input_bytes.decode('utf-8')
+            except UnicodeDecodeError as exn:
+                logger.error('Invalid UTF-8 input bytes: %r', input_bytes)
+                raise exn
+        else:
+            # PY2
+            return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable
+
+    def skip_null(self):
+        pass
+
+    def skip_boolean(self):
+        self.skip(1)
+
+    def skip_int(self):
+        self.skip_long()
+
+    def skip_long(self):
+        b = ord(self.read(1))
+        while (b & 0x80) != 0:
+            b = ord(self.read(1))
+
+    def skip_float(self):
+        self.skip(4)
+
+    def skip_double(self):
+        self.skip(8)
+
+    def skip_bytes(self):
+        self.skip(self.read_long())
+
+    def skip_utf8(self):
+        self.skip_bytes()
+
+    def skip(self, n):
+        self.reader.seek(self.reader.tell() + n)
+
+
+# ------------------------------------------------------------------------------
+# DatumReader
+
+
+class DatumReader(object):
+    """Deserialize Avro-encoded data into a Python data structure."""
+
+    def __init__(self, writer_schema=None):
+        """
+        As defined in the Avro specification, we call the schema encoded
+        in the data the "writer's schema".
+        """
+        self._writer_schema = writer_schema
+
+    # read/write properties
+    def set_writer_schema(self, writer_schema):
+        self._writer_schema = writer_schema
+
+    writer_schema = property(lambda self: self._writer_schema,
+                             set_writer_schema)
+
+    def read(self, decoder):
+        return self.read_data(self.writer_schema, decoder)
+
+    def read_data(self, writer_schema, decoder):
+        # function dispatch for reading data based on type of writer's schema
+        if writer_schema.type == 'null':
+            result = decoder.read_null()
+        elif writer_schema.type == 'boolean':
+            result = decoder.read_boolean()
+        elif writer_schema.type == 'string':
+            result = decoder.read_utf8()
+        elif writer_schema.type == 'int':
+            result = decoder.read_int()
+        elif writer_schema.type == 'long':
+            result = decoder.read_long()
+        elif writer_schema.type == 'float':
+            result = decoder.read_float()
+        elif writer_schema.type == 'double':
+            result = decoder.read_double()
+        elif writer_schema.type == 'bytes':
+            result = decoder.read_bytes()
+        elif writer_schema.type == 'fixed':
+            result = self.read_fixed(writer_schema, decoder)
+        elif writer_schema.type == 'enum':
+            result = self.read_enum(writer_schema, decoder)
+        elif writer_schema.type == 'array':
+            result = self.read_array(writer_schema, decoder)
+        elif writer_schema.type == 'map':
+            result = self.read_map(writer_schema, decoder)
+        elif writer_schema.type in ['union', 'error_union']:
+            result = self.read_union(writer_schema, decoder)
+        elif writer_schema.type in ['record', 'error', 'request']:
+            result = self.read_record(writer_schema, decoder)
+        else:
+            fail_msg = f"Cannot read unknown schema type: {writer_schema.type}"
+            raise schema.AvroException(fail_msg)
+        return result
+
+    def skip_data(self, writer_schema, decoder):
+        if writer_schema.type == 'null':
+            result = decoder.skip_null()
+        elif writer_schema.type == 'boolean':
+            result = decoder.skip_boolean()
+        elif writer_schema.type == 'string':
+            result = decoder.skip_utf8()
+        elif writer_schema.type == 'int':
+            result = decoder.skip_int()
+        elif writer_schema.type == 'long':
+            result = decoder.skip_long()
+        elif writer_schema.type == 'float':
+            result = decoder.skip_float()
+        elif writer_schema.type == 'double':
+            result = decoder.skip_double()
+        elif writer_schema.type == 'bytes':
+            result = decoder.skip_bytes()
+        elif writer_schema.type == 'fixed':
+            result = self.skip_fixed(writer_schema, decoder)
+        elif writer_schema.type == 'enum':
+            result = self.skip_enum(decoder)
+        elif writer_schema.type == 'array':
+            self.skip_array(writer_schema, decoder)
+            result = None
+        elif writer_schema.type == 'map':
+            self.skip_map(writer_schema, decoder)
+            result = None
+        elif writer_schema.type in ['union', 'error_union']:
+            result = self.skip_union(writer_schema, decoder)
+        elif writer_schema.type in ['record', 'error', 'request']:
+            self.skip_record(writer_schema, decoder)
+            result = None
+        else:
+            fail_msg = f"Unknown schema type: {writer_schema.type}"
+            raise schema.AvroException(fail_msg)
+        return result
+
+    # Fixed instances are encoded using the number of bytes declared in the schema.
+    @staticmethod
+    def read_fixed(writer_schema, decoder):
+        return decoder.read(writer_schema.size)
+
+    @staticmethod
+    def skip_fixed(writer_schema, decoder):
+        return decoder.skip(writer_schema.size)
+
+    # An enum is encoded by a int, representing the zero-based position of the symbol in the schema.
+    @staticmethod
+    def read_enum(writer_schema, decoder):
+        # read data
+        index_of_symbol = decoder.read_int()
+        if index_of_symbol >= len(writer_schema.symbols):
+            fail_msg = f"Can't access enum index {index_of_symbol} for enum with {len(writer_schema.symbols)} symbols"
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        read_symbol = writer_schema.symbols[index_of_symbol]
+        return read_symbol
+
+    @staticmethod
+    def skip_enum(decoder):
+        return decoder.skip_int()
+
+    # Arrays are encoded as a series of blocks.
+
+    # Each block consists of a long count value, followed by that many array items.
+    # A block with count zero indicates the end of the array. Each item is encoded per the array's item schema.
+
+    # If a block's count is negative, then the count is followed immediately by a long block size,
+    # indicating the number of bytes in the block.
+    # The actual count in this case is the absolute value of the count written.
+    def read_array(self, writer_schema, decoder):
+        read_items = []
+        block_count = decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_count = -block_count
+                decoder.read_long()
+            for _ in range(block_count):
+                read_items.append(self.read_data(writer_schema.items, decoder))
+            block_count = decoder.read_long()
+        return read_items
+
+    def skip_array(self, writer_schema, decoder):
+        block_count = decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_size = decoder.read_long()
+                decoder.skip(block_size)
+            else:
+                for _ in range(block_count):
+                    self.skip_data(writer_schema.items, decoder)
+            block_count = decoder.read_long()
+
+    # Maps are encoded as a series of blocks.
+
+    # Each block consists of a long count value, followed by that many key/value pairs.
+    # A block with count zero indicates the end of the map. Each item is encoded per the map's value schema.
+
+    # If a block's count is negative, then the count is followed immediately by a long block size,
+    # indicating the number of bytes in the block.
+    # The actual count in this case is the absolute value of the count written.
+    def read_map(self, writer_schema, decoder):
+        read_items = {}
+        block_count = decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_count = -block_count
+                decoder.read_long()
+            for _ in range(block_count):
+                key = decoder.read_utf8()
+                read_items[key] = self.read_data(writer_schema.values, decoder)
+            block_count = decoder.read_long()
+        return read_items
+
+    def skip_map(self, writer_schema, decoder):
+        block_count = decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_size = decoder.read_long()
+                decoder.skip(block_size)
+            else:
+                for _ in range(block_count):
+                    decoder.skip_utf8()
+                    self.skip_data(writer_schema.values, decoder)
+            block_count = decoder.read_long()
+
+    # A union is encoded by first writing a long value indicating
+    # the zero-based position within the union of the schema of its value.
+    # The value is then encoded per the indicated schema within the union.
+    def read_union(self, writer_schema, decoder):
+        # schema resolution
+        index_of_schema = int(decoder.read_long())
+        if index_of_schema >= len(writer_schema.schemas):
+            fail_msg = (f"Can't access branch index {index_of_schema} "
+                        f"for union with {len(writer_schema.schemas)} branches")
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        selected_writer_schema = writer_schema.schemas[index_of_schema]
+
+        # read data
+        return self.read_data(selected_writer_schema, decoder)
+
+    def skip_union(self, writer_schema, decoder):
+        index_of_schema = int(decoder.read_long())
+        if index_of_schema >= len(writer_schema.schemas):
+            fail_msg = (f"Can't access branch index {index_of_schema} "
+                        f"for union with {len(writer_schema.schemas)} branches")
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        return self.skip_data(writer_schema.schemas[index_of_schema], decoder)
+
+    # A record is encoded by encoding the values of its fields
+    # in the order that they are declared. In other words, a record
+    # is encoded as just the concatenation of the encodings of its fields.
+    # Field values are encoded per their schema.
+
+    # Schema Resolution:
+    #     * the ordering of fields may be different: fields are matched by name.
+    #     * schemas for fields with the same name in both records are resolved
+    #     recursively.
+    #     * if the writer's record contains a field with a name not present in the
+    #     reader's record, the writer's value for that field is ignored.
+    #     * if the reader's record schema has a field that contains a default value,
+    #     and writer's schema does not have a field with the same name, then the
+    #     reader should use the default value from its field.
+    #     * if the reader's record schema has a field with no default value, and
+    #     writer's schema does not have a field with the same name, then the
+    #     field's value is unset.
+    def read_record(self, writer_schema, decoder):
+        # schema resolution
+        read_record = {}
+        for field in writer_schema.fields:
+            field_val = self.read_data(field.type, decoder)
+            read_record[field.name] = field_val
+        return read_record
+
+    def skip_record(self, writer_schema, decoder):
+        for field in writer_schema.fields:
+            self.skip_data(field.type, decoder)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io_async.py
new file mode 100644
index 00000000..8688661b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/avro_io_async.py
@@ -0,0 +1,419 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-missing-return, docstring-missing-rtype
+
+"""Input/output utilities.
+
+Includes:
+ - i/o-specific constants
+ - i/o-specific exceptions
+ - schema validation
+ - leaf value encoding and decoding
+ - datum reader/writer stuff (?)
+
+Also includes a generic representation for data, which uses the
+following mapping:
+ - Schema records are implemented as dict.
+ - Schema arrays are implemented as list.
+ - Schema maps are implemented as dict.
+ - Schema strings are implemented as unicode.
+ - Schema bytes are implemented as str.
+ - Schema ints are implemented as int.
+ - Schema longs are implemented as long.
+ - Schema floats are implemented as float.
+ - Schema doubles are implemented as float.
+ - Schema booleans are implemented as bool.
+"""
+
+import logging
+import sys
+
+from ..avro import schema
+
+from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Decoder
+
+
+class AsyncBinaryDecoder(object):
+    """Read leaf values."""
+
+    def __init__(self, reader):
+        """
+        reader is a Python object on which we can call read, seek, and tell.
+        """
+        self._reader = reader
+
+    @property
+    def reader(self):
+        """Reports the reader used by this decoder."""
+        return self._reader
+
+    async def read(self, n):
+        """Read n bytes.
+
+        :param int n: Number of bytes to read.
+        :returns: The next n bytes from the input.
+        :rtype: bytes
+        """
+        assert (n >= 0), n
+        input_bytes = await self.reader.read(n)
+        if n > 0 and not input_bytes:
+            raise StopAsyncIteration
+        assert (len(input_bytes) == n), input_bytes
+        return input_bytes
+
+    @staticmethod
+    def read_null():
+        """
+        null is written as zero bytes
+        """
+        return None
+
+    async def read_boolean(self):
+        """
+        a boolean is written as a single byte
+        whose value is either 0 (false) or 1 (true).
+        """
+        b = ord(await self.read(1))
+        if b == 1:
+            return True
+        if b == 0:
+            return False
+        fail_msg = f"Invalid value for boolean: {b}"
+        raise schema.AvroException(fail_msg)
+
+    async def read_int(self):
+        """
+        int and long values are written using variable-length, zig-zag coding.
+        """
+        return await self.read_long()
+
+    async def read_long(self):
+        """
+        int and long values are written using variable-length, zig-zag coding.
+        """
+        b = ord(await self.read(1))
+        n = b & 0x7F
+        shift = 7
+        while (b & 0x80) != 0:
+            b = ord(await self.read(1))
+            n |= (b & 0x7F) << shift
+            shift += 7
+        datum = (n >> 1) ^ -(n & 1)
+        return datum
+
+    async def read_float(self):
+        """
+        A float is written as 4 bytes.
+        The float is converted into a 32-bit integer using a method equivalent to
+        Java's floatToIntBits and then encoded in little-endian format.
+        """
+        return STRUCT_FLOAT.unpack(await self.read(4))[0]
+
+    async def read_double(self):
+        """
+        A double is written as 8 bytes.
+        The double is converted into a 64-bit integer using a method equivalent to
+        Java's doubleToLongBits and then encoded in little-endian format.
+        """
+        return STRUCT_DOUBLE.unpack(await self.read(8))[0]
+
+    async def read_bytes(self):
+        """
+        Bytes are encoded as a long followed by that many bytes of data.
+        """
+        nbytes = await self.read_long()
+        assert (nbytes >= 0), nbytes
+        return await self.read(nbytes)
+
+    async def read_utf8(self):
+        """
+        A string is encoded as a long followed by
+        that many bytes of UTF-8 encoded character data.
+        """
+        input_bytes = await self.read_bytes()
+        if PY3:
+            try:
+                return input_bytes.decode('utf-8')
+            except UnicodeDecodeError as exn:
+                logger.error('Invalid UTF-8 input bytes: %r', input_bytes)
+                raise exn
+        else:
+            # PY2
+            return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable
+
+    def skip_null(self):
+        pass
+
+    async def skip_boolean(self):
+        await self.skip(1)
+
+    async def skip_int(self):
+        await self.skip_long()
+
+    async def skip_long(self):
+        b = ord(await self.read(1))
+        while (b & 0x80) != 0:
+            b = ord(await self.read(1))
+
+    async def skip_float(self):
+        await self.skip(4)
+
+    async def skip_double(self):
+        await self.skip(8)
+
+    async def skip_bytes(self):
+        await self.skip(await self.read_long())
+
+    async def skip_utf8(self):
+        await self.skip_bytes()
+
+    async def skip(self, n):
+        await self.reader.seek(await self.reader.tell() + n)
+
+
+# ------------------------------------------------------------------------------
+# DatumReader
+
+
+class AsyncDatumReader(object):
+    """Deserialize Avro-encoded data into a Python data structure."""
+
+    def __init__(self, writer_schema=None):
+        """
+        As defined in the Avro specification, we call the schema encoded
+        in the data the "writer's schema", and the schema expected by the
+        reader the "reader's schema".
+        """
+        self._writer_schema = writer_schema
+
+    # read/write properties
+    def set_writer_schema(self, writer_schema):
+        self._writer_schema = writer_schema
+
+    writer_schema = property(lambda self: self._writer_schema,
+                             set_writer_schema)
+
+    async def read(self, decoder):
+        return await self.read_data(self.writer_schema, decoder)
+
+    async def read_data(self, writer_schema, decoder):
+        # function dispatch for reading data based on type of writer's schema
+        if writer_schema.type == 'null':
+            result = decoder.read_null()
+        elif writer_schema.type == 'boolean':
+            result = await decoder.read_boolean()
+        elif writer_schema.type == 'string':
+            result = await decoder.read_utf8()
+        elif writer_schema.type == 'int':
+            result = await decoder.read_int()
+        elif writer_schema.type == 'long':
+            result = await decoder.read_long()
+        elif writer_schema.type == 'float':
+            result = await decoder.read_float()
+        elif writer_schema.type == 'double':
+            result = await decoder.read_double()
+        elif writer_schema.type == 'bytes':
+            result = await decoder.read_bytes()
+        elif writer_schema.type == 'fixed':
+            result = await self.read_fixed(writer_schema, decoder)
+        elif writer_schema.type == 'enum':
+            result = await self.read_enum(writer_schema, decoder)
+        elif writer_schema.type == 'array':
+            result = await self.read_array(writer_schema, decoder)
+        elif writer_schema.type == 'map':
+            result = await self.read_map(writer_schema, decoder)
+        elif writer_schema.type in ['union', 'error_union']:
+            result = await self.read_union(writer_schema, decoder)
+        elif writer_schema.type in ['record', 'error', 'request']:
+            result = await self.read_record(writer_schema, decoder)
+        else:
+            fail_msg = f"Cannot read unknown schema type: {writer_schema.type}"
+            raise schema.AvroException(fail_msg)
+        return result
+
+    async def skip_data(self, writer_schema, decoder):
+        if writer_schema.type == 'null':
+            result = decoder.skip_null()
+        elif writer_schema.type == 'boolean':
+            result = await decoder.skip_boolean()
+        elif writer_schema.type == 'string':
+            result = await decoder.skip_utf8()
+        elif writer_schema.type == 'int':
+            result = await decoder.skip_int()
+        elif writer_schema.type == 'long':
+            result = await decoder.skip_long()
+        elif writer_schema.type == 'float':
+            result = await decoder.skip_float()
+        elif writer_schema.type == 'double':
+            result = await decoder.skip_double()
+        elif writer_schema.type == 'bytes':
+            result = await decoder.skip_bytes()
+        elif writer_schema.type == 'fixed':
+            result = await self.skip_fixed(writer_schema, decoder)
+        elif writer_schema.type == 'enum':
+            result = await self.skip_enum(decoder)
+        elif writer_schema.type == 'array':
+            await self.skip_array(writer_schema, decoder)
+            result = None
+        elif writer_schema.type == 'map':
+            await self.skip_map(writer_schema, decoder)
+            result = None
+        elif writer_schema.type in ['union', 'error_union']:
+            result = await self.skip_union(writer_schema, decoder)
+        elif writer_schema.type in ['record', 'error', 'request']:
+            await self.skip_record(writer_schema, decoder)
+            result = None
+        else:
+            fail_msg = f"Unknown schema type: {writer_schema.type}"
+            raise schema.AvroException(fail_msg)
+        return result
+
+    # Fixed instances are encoded using the number of bytes declared in the schema.
+    @staticmethod
+    async def read_fixed(writer_schema, decoder):
+        return await decoder.read(writer_schema.size)
+
+    @staticmethod
+    async def skip_fixed(writer_schema, decoder):
+        return await decoder.skip(writer_schema.size)
+
+    # An enum is encoded by a int, representing the zero-based position of the symbol in the schema.
+    @staticmethod
+    async def read_enum(writer_schema, decoder):
+        # read data
+        index_of_symbol = await decoder.read_int()
+        if index_of_symbol >= len(writer_schema.symbols):
+            fail_msg = f"Can't access enum index {index_of_symbol} for enum with {len(writer_schema.symbols)} symbols"
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        read_symbol = writer_schema.symbols[index_of_symbol]
+        return read_symbol
+
+    @staticmethod
+    async def skip_enum(decoder):
+        return await decoder.skip_int()
+
+    # Arrays are encoded as a series of blocks.
+
+    # Each block consists of a long count value, followed by that many array items.
+    # A block with count zero indicates the end of the array. Each item is encoded per the array's item schema.
+
+    # If a block's count is negative, then the count is followed immediately by a long block size,
+    # indicating the number of bytes in the block.
+    # The actual count in this case is the absolute value of the count written.
+    async def read_array(self, writer_schema, decoder):
+        read_items = []
+        block_count = await decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_count = -block_count
+                await decoder.read_long()
+            for _ in range(block_count):
+                read_items.append(await self.read_data(writer_schema.items, decoder))
+            block_count = await decoder.read_long()
+        return read_items
+
+    async def skip_array(self, writer_schema, decoder):
+        block_count = await decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_size = await decoder.read_long()
+                await decoder.skip(block_size)
+            else:
+                for _ in range(block_count):
+                    await self.skip_data(writer_schema.items, decoder)
+            block_count = await decoder.read_long()
+
+    # Maps are encoded as a series of blocks.
+
+    # Each block consists of a long count value, followed by that many key/value pairs.
+    # A block with count zero indicates the end of the map. Each item is encoded per the map's value schema.
+
+    # If a block's count is negative, then the count is followed immediately by a long block size,
+    # indicating the number of bytes in the block.
+    # The actual count in this case is the absolute value of the count written.
+    async def read_map(self, writer_schema, decoder):
+        read_items = {}
+        block_count = await decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_count = -block_count
+                await decoder.read_long()
+            for _ in range(block_count):
+                key = await decoder.read_utf8()
+                read_items[key] = await self.read_data(writer_schema.values, decoder)
+            block_count = await decoder.read_long()
+        return read_items
+
+    async def skip_map(self, writer_schema, decoder):
+        block_count = await decoder.read_long()
+        while block_count != 0:
+            if block_count < 0:
+                block_size = await decoder.read_long()
+                await decoder.skip(block_size)
+            else:
+                for _ in range(block_count):
+                    await decoder.skip_utf8()
+                    await self.skip_data(writer_schema.values, decoder)
+            block_count = await decoder.read_long()
+
+    # A union is encoded by first writing a long value indicating
+    # the zero-based position within the union of the schema of its value.
+    # The value is then encoded per the indicated schema within the union.
+    async def read_union(self, writer_schema, decoder):
+        # schema resolution
+        index_of_schema = int(await decoder.read_long())
+        if index_of_schema >= len(writer_schema.schemas):
+            fail_msg = (f"Can't access branch index {index_of_schema} "
+                    f"for union with {len(writer_schema.schemas)} branches")
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        selected_writer_schema = writer_schema.schemas[index_of_schema]
+
+        # read data
+        return await self.read_data(selected_writer_schema, decoder)
+
+    async def skip_union(self, writer_schema, decoder):
+        index_of_schema = int(await decoder.read_long())
+        if index_of_schema >= len(writer_schema.schemas):
+            fail_msg = (f"Can't access branch index {index_of_schema} "
+                    f"for union with {len(writer_schema.schemas)} branches")
+            raise SchemaResolutionException(fail_msg, writer_schema)
+        return await self.skip_data(writer_schema.schemas[index_of_schema], decoder)
+
+    # A record is encoded by encoding the values of its fields
+    # in the order that they are declared. In other words, a record
+    # is encoded as just the concatenation of the encodings of its fields.
+    # Field values are encoded per their schema.
+
+    # Schema Resolution:
+    #     * the ordering of fields may be different: fields are matched by name.
+    #     * schemas for fields with the same name in both records are resolved
+    #     recursively.
+    #     * if the writer's record contains a field with a name not present in the
+    #     reader's record, the writer's value for that field is ignored.
+    #     * if the reader's record schema has a field that contains a default value,
+    #     and writer's schema does not have a field with the same name, then the
+    #     reader should use the default value from its field.
+    #     * if the reader's record schema has a field with no default value, and
+    #     writer's schema does not have a field with the same name, then the
+    #     field's value is unset.
+    async def read_record(self, writer_schema, decoder):
+        # schema resolution
+        read_record = {}
+        for field in writer_schema.fields:
+            field_val = await self.read_data(field.type, decoder)
+            read_record[field.name] = field_val
+        return read_record
+
+    async def skip_record(self, writer_schema, decoder):
+        for field in writer_schema.fields:
+            await self.skip_data(field.type, decoder)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile.py
new file mode 100644
index 00000000..757e0329
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile.py
@@ -0,0 +1,257 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-missing-return, docstring-missing-rtype
+
+"""Read/Write Avro File Object Containers."""
+
+import io
+import logging
+import sys
+import zlib
+
+from ..avro import avro_io
+from ..avro import schema
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Version of the container file:
+VERSION = 1
+
+if PY3:
+    MAGIC = b'Obj' + bytes([VERSION])
+    MAGIC_SIZE = len(MAGIC)
+else:
+    MAGIC = 'Obj' + chr(VERSION)
+    MAGIC_SIZE = len(MAGIC)
+
+# Size of the synchronization marker, in number of bytes:
+SYNC_SIZE = 16
+
+# Schema of the container header:
+META_SCHEMA = schema.parse("""
+{
+  "type": "record", "name": "org.apache.avro.file.Header",
+  "fields": [{
+    "name": "magic",
+    "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d}
+  }, {
+    "name": "meta",
+    "type": {"type": "map", "values": "bytes"}
+  }, {
+    "name": "sync",
+    "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d}
+  }]
+}
+""" % {
+    'magic_size': MAGIC_SIZE,
+    'sync_size': SYNC_SIZE,
+})
+
+# Codecs supported by container files:
+VALID_CODECS = frozenset(['null', 'deflate'])
+
+# Metadata key associated to the schema:
+SCHEMA_KEY = "avro.schema"
+
+
+# ------------------------------------------------------------------------------
+# Exceptions
+
+
+class DataFileException(schema.AvroException):
+    """Problem reading or writing file object containers."""
+
+# ------------------------------------------------------------------------------
+
+
+class DataFileReader(object):  # pylint: disable=too-many-instance-attributes
+    """Read files written by DataFileWriter."""
+
+    def __init__(self, reader, datum_reader, **kwargs):
+        """Initializes a new data file reader.
+
+        Args:
+          reader: Open file to read from.
+          datum_reader: Avro datum reader.
+        """
+        self._reader = reader
+        self._raw_decoder = avro_io.BinaryDecoder(reader)
+        self._header_reader = kwargs.pop('header_reader', None)
+        self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader)
+        self._datum_decoder = None  # Maybe reset at every block.
+        self._datum_reader = datum_reader
+
+        # In case self._reader only has partial content(without header).
+        # seek(0, 0) to make sure read the (partial)content from beginning.
+        self._reader.seek(0, 0)
+
+        # read the header: magic, meta, sync
+        self._read_header()
+
+        # ensure codec is valid
+        avro_codec_raw = self.get_meta('avro.codec')
+        if avro_codec_raw is None:
+            self.codec = "null"
+        else:
+            self.codec = avro_codec_raw.decode('utf-8')
+        if self.codec not in VALID_CODECS:
+            raise DataFileException(f"Unknown codec: {self.codec}.")
+
+        # get ready to read
+        self._block_count = 0
+
+        # object_position is to support reading from current position in the future read,
+        # no need to downloading from the beginning of avro.
+        if hasattr(self._reader, 'object_position'):
+            self.reader.track_object_position()
+
+        self._cur_object_index = 0
+        # header_reader indicates reader only has partial content. The reader doesn't have block header,
+        # so we read use the block count stored last time.
+        # Also ChangeFeed only has codec==null, so use _raw_decoder is good.
+        if self._header_reader is not None:
+            self._datum_decoder = self._raw_decoder
+
+        self.datum_reader.writer_schema = (
+            schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, data_type, value, traceback):
+        # Perform a close if there's no exception
+        if data_type is None:
+            self.close()
+
+    def __iter__(self):
+        return self
+
+    # read-only properties
+    @property
+    def reader(self):
+        return self._reader
+
+    @property
+    def raw_decoder(self):
+        return self._raw_decoder
+
+    @property
+    def datum_decoder(self):
+        return self._datum_decoder
+
+    @property
+    def datum_reader(self):
+        return self._datum_reader
+
+    @property
+    def sync_marker(self):
+        return self._sync_marker
+
+    @property
+    def meta(self):
+        return self._meta
+
+    # read/write properties
+    @property
+    def block_count(self):
+        return self._block_count
+
+    def get_meta(self, key):
+        """Reports the value of a given metadata key.
+
+        :param str key: Metadata key to report the value of.
+        :returns: Value associated to the metadata key, as bytes.
+        :rtype: bytes
+        """
+        return self._meta.get(key)
+
+    def _read_header(self):
+        header_reader = self._header_reader if self._header_reader else self._reader
+        header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
+
+        # seek to the beginning of the file to get magic block
+        header_reader.seek(0, 0)
+
+        # read header into a dict
+        header = self.datum_reader.read_data(META_SCHEMA, header_decoder)
+
+        # check magic number
+        if header.get('magic') != MAGIC:
+            fail_msg = f"Not an Avro data file: {header.get('magic')} doesn't match {MAGIC!r}."
+            raise schema.AvroException(fail_msg)
+
+        # set metadata
+        self._meta = header['meta']
+
+        # set sync marker
+        self._sync_marker = header['sync']
+
+    def _read_block_header(self):
+        self._block_count = self.raw_decoder.read_long()
+        if self.codec == "null":
+            # Skip a long; we don't need to use the length.
+            self.raw_decoder.skip_long()
+            self._datum_decoder = self._raw_decoder
+        elif self.codec == 'deflate':
+            # Compressed data is stored as (length, data), which
+            # corresponds to how the "bytes" type is encoded.
+            data = self.raw_decoder.read_bytes()
+            # -15 is the log of the window size; negative indicates
+            # "raw" (no zlib headers) decompression.  See zlib.h.
+            uncompressed = zlib.decompress(data, -15)
+            self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed))
+        else:
+            raise DataFileException(f"Unknown codec: {self.codec!r}")
+
+    def _skip_sync(self):
+        """
+        Read the length of the sync marker; if it matches the sync marker,
+        return True. Otherwise, seek back to where we started and return False.
+        """
+        proposed_sync_marker = self.reader.read(SYNC_SIZE)
+        if SYNC_SIZE > 0 and not proposed_sync_marker:
+            raise StopIteration
+        if proposed_sync_marker != self.sync_marker:
+            self.reader.seek(-SYNC_SIZE, 1)
+
+    def __next__(self):
+        """Return the next datum in the file."""
+        if self.block_count == 0:
+            self._skip_sync()
+
+            # object_position is to support reading from current position in the future read,
+            # no need to downloading from the beginning of avro file with this attr.
+            if hasattr(self._reader, 'object_position'):
+                self.reader.track_object_position()
+            self._cur_object_index = 0
+
+            self._read_block_header()
+
+        datum = self.datum_reader.read(self.datum_decoder)
+        self._block_count -= 1
+        self._cur_object_index += 1
+
+        # object_position is to support reading from current position in the future read,
+        # This will track the index of the next item to be read.
+        # This will also track the offset before the next sync marker.
+        if hasattr(self._reader, 'object_position'):
+            if self.block_count == 0:
+                # the next event to be read is at index 0 in the new chunk of blocks,
+                self.reader.track_object_position()
+                self.reader.set_object_index(0)
+            else:
+                self.reader.set_object_index(self._cur_object_index)
+
+        return datum
+
+    def close(self):
+        """Close this reader."""
+        self.reader.close()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile_async.py
new file mode 100644
index 00000000..85dc5cb5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/datafile_async.py
@@ -0,0 +1,210 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-missing-return, docstring-missing-rtype
+
+"""Read/Write Avro File Object Containers."""
+
+import logging
+import sys
+
+from ..avro import avro_io_async
+from ..avro import schema
+from .datafile import DataFileException
+from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY
+
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Codecs supported by container files:
+VALID_CODECS = frozenset(['null'])
+
+
+class AsyncDataFileReader(object):  # pylint: disable=too-many-instance-attributes
+    """Read files written by DataFileWriter."""
+
+    def __init__(self, reader, datum_reader, **kwargs):
+        """Initializes a new data file reader.
+
+        Args:
+          reader: Open file to read from.
+          datum_reader: Avro datum reader.
+        """
+        self._reader = reader
+        self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader)
+        self._header_reader = kwargs.pop('header_reader', None)
+        self._header_decoder = None if self._header_reader is None else \
+            avro_io_async.AsyncBinaryDecoder(self._header_reader)
+        self._datum_decoder = None  # Maybe reset at every block.
+        self._datum_reader = datum_reader
+        self.codec = "null"
+        self._block_count = 0
+        self._cur_object_index = 0
+        self._meta = None
+        self._sync_marker = None
+
+    async def init(self):
+        # In case self._reader only has partial content(without header).
+        # seek(0, 0) to make sure read the (partial)content from beginning.
+        await self._reader.seek(0, 0)
+
+        # read the header: magic, meta, sync
+        await self._read_header()
+
+        # ensure codec is valid
+        avro_codec_raw = self.get_meta('avro.codec')
+        if avro_codec_raw is None:
+            self.codec = "null"
+        else:
+            self.codec = avro_codec_raw.decode('utf-8')
+        if self.codec not in VALID_CODECS:
+            raise DataFileException(f"Unknown codec: {self.codec}.")
+
+        # get ready to read
+        self._block_count = 0
+
+        # object_position is to support reading from current position in the future read,
+        # no need to downloading from the beginning of avro.
+        if hasattr(self._reader, 'object_position'):
+            self.reader.track_object_position()
+
+        # header_reader indicates reader only has partial content. The reader doesn't have block header,
+        # so we read use the block count stored last time.
+        # Also ChangeFeed only has codec==null, so use _raw_decoder is good.
+        if self._header_reader is not None:
+            self._datum_decoder = self._raw_decoder
+        self.datum_reader.writer_schema = (
+            schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
+        return self
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, data_type, value, traceback):
+        # Perform a close if there's no exception
+        if data_type is None:
+            self.close()
+
+    def __aiter__(self):
+        return self
+
+    # read-only properties
+    @property
+    def reader(self):
+        return self._reader
+
+    @property
+    def raw_decoder(self):
+        return self._raw_decoder
+
+    @property
+    def datum_decoder(self):
+        return self._datum_decoder
+
+    @property
+    def datum_reader(self):
+        return self._datum_reader
+
+    @property
+    def sync_marker(self):
+        return self._sync_marker
+
+    @property
+    def meta(self):
+        return self._meta
+
+    # read/write properties
+    @property
+    def block_count(self):
+        return self._block_count
+
+    def get_meta(self, key):
+        """Reports the value of a given metadata key.
+
+        :param str key: Metadata key to report the value of.
+        :returns: Value associated to the metadata key, as bytes.
+        :rtype: bytes
+        """
+        return self._meta.get(key)
+
+    async def _read_header(self):
+        header_reader = self._header_reader if self._header_reader else self._reader
+        header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
+
+        # seek to the beginning of the file to get magic block
+        await header_reader.seek(0, 0)
+
+        # read header into a dict
+        header = await self.datum_reader.read_data(META_SCHEMA, header_decoder)
+
+        # check magic number
+        if header.get('magic') != MAGIC:
+            fail_msg = f"Not an Avro data file: {header.get('magic')} doesn't match {MAGIC!r}."
+            raise schema.AvroException(fail_msg)
+
+        # set metadata
+        self._meta = header['meta']
+
+        # set sync marker
+        self._sync_marker = header['sync']
+
+    async def _read_block_header(self):
+        self._block_count = await self.raw_decoder.read_long()
+        if self.codec == "null":
+            # Skip a long; we don't need to use the length.
+            await self.raw_decoder.skip_long()
+            self._datum_decoder = self._raw_decoder
+        else:
+            raise DataFileException(f"Unknown codec: {self.codec!r}")
+
+    async def _skip_sync(self):
+        """
+        Read the length of the sync marker; if it matches the sync marker,
+        return True. Otherwise, seek back to where we started and return False.
+        """
+        proposed_sync_marker = await self.reader.read(SYNC_SIZE)
+        if SYNC_SIZE > 0 and not proposed_sync_marker:
+            raise StopAsyncIteration
+        if proposed_sync_marker != self.sync_marker:
+            await self.reader.seek(-SYNC_SIZE, 1)
+
+    async def __anext__(self):
+        """Return the next datum in the file."""
+        if self.block_count == 0:
+            await self._skip_sync()
+
+            # object_position is to support reading from current position in the future read,
+            # no need to downloading from the beginning of avro file with this attr.
+            if hasattr(self._reader, 'object_position'):
+                await self.reader.track_object_position()
+            self._cur_object_index = 0
+
+            await self._read_block_header()
+
+        datum = await self.datum_reader.read(self.datum_decoder)
+        self._block_count -= 1
+        self._cur_object_index += 1
+
+        # object_position is to support reading from current position in the future read,
+        # This will track the index of the next item to be read.
+        # This will also track the offset before the next sync marker.
+        if hasattr(self._reader, 'object_position'):
+            if self.block_count == 0:
+                # the next event to be read is at index 0 in the new chunk of blocks,
+                await self.reader.track_object_position()
+                await self.reader.set_object_index(0)
+            else:
+                await self.reader.set_object_index(self._cur_object_index)
+
+        return datum
+
+    def close(self):
+        """Close this reader."""
+        self.reader.close()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/schema.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/schema.py
new file mode 100644
index 00000000..d5484abc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/avro/schema.py
@@ -0,0 +1,1178 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-missing-return, docstring-missing-rtype, too-many-lines
+
+"""Representation of Avro schemas.
+
+A schema may be one of:
+ - A record, mapping field names to field value data;
+ - An error, equivalent to a record;
+ - An enum, containing one of a small set of symbols;
+ - An array of values, all of the same schema;
+ - A map containing string/value pairs, each of a declared schema;
+ - A union of other schemas;
+ - A fixed sized binary object;
+ - A unicode string;
+ - A sequence of bytes;
+ - A 32-bit signed int;
+ - A 64-bit signed long;
+ - A 32-bit floating-point float;
+ - A 64-bit floating-point double;
+ - A boolean;
+ - Null.
+"""
+
+import abc
+import json
+import logging
+import re
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Log level more verbose than DEBUG=10, INFO=20, etc.
+DEBUG_VERBOSE = 5
+
+NULL = 'null'
+BOOLEAN = 'boolean'
+STRING = 'string'
+BYTES = 'bytes'
+INT = 'int'
+LONG = 'long'
+FLOAT = 'float'
+DOUBLE = 'double'
+FIXED = 'fixed'
+ENUM = 'enum'
+RECORD = 'record'
+ERROR = 'error'
+ARRAY = 'array'
+MAP = 'map'
+UNION = 'union'
+
+# Request and error unions are part of Avro protocols:
+REQUEST = 'request'
+ERROR_UNION = 'error_union'
+
+PRIMITIVE_TYPES = frozenset([
+    NULL,
+    BOOLEAN,
+    STRING,
+    BYTES,
+    INT,
+    LONG,
+    FLOAT,
+    DOUBLE,
+])
+
+NAMED_TYPES = frozenset([
+    FIXED,
+    ENUM,
+    RECORD,
+    ERROR,
+])
+
+VALID_TYPES = frozenset.union(
+    PRIMITIVE_TYPES,
+    NAMED_TYPES,
+    [
+        ARRAY,
+        MAP,
+        UNION,
+        REQUEST,
+        ERROR_UNION,
+    ],
+)
+
+SCHEMA_RESERVED_PROPS = frozenset([
+    'type',
+    'name',
+    'namespace',
+    'fields',  # Record
+    'items',  # Array
+    'size',  # Fixed
+    'symbols',  # Enum
+    'values',  # Map
+    'doc',
+])
+
+FIELD_RESERVED_PROPS = frozenset([
+    'default',
+    'name',
+    'doc',
+    'order',
+    'type',
+])
+
+VALID_FIELD_SORT_ORDERS = frozenset([
+    'ascending',
+    'descending',
+    'ignore',
+])
+
+
+# ------------------------------------------------------------------------------
+# Exceptions
+
+
+class Error(Exception):
+    """Base class for errors in this module."""
+
+
+class AvroException(Error):
+    """Generic Avro schema error."""
+
+
+class SchemaParseException(AvroException):
+    """Error while parsing a JSON schema descriptor."""
+
+
+class Schema(metaclass=abc.ABCMeta):
+    """Abstract base class for all Schema classes."""
+
+    def __init__(self, data_type, other_props=None):
+        """Initializes a new schema object.
+
+        Args:
+          data_type: Type of the schema to initialize.
+          other_props: Optional dictionary of additional properties.
+        """
+        if data_type not in VALID_TYPES:
+            raise SchemaParseException(f'{data_type!r} is not a valid Avro type.')
+
+        # All properties of this schema, as a map: property name -> property value
+        self._props = {}
+
+        self._props['type'] = data_type
+        self._type = data_type
+
+        if other_props:
+            self._props.update(other_props)
+
+    @property
+    def namespace(self):
+        """Returns: the namespace this schema belongs to, if any, or None."""
+        return self._props.get('namespace', None)
+
+    @property
+    def type(self):
+        """Returns: the type of this schema."""
+        return self._type
+
+    @property
+    def doc(self):
+        """Returns: the documentation associated to this schema, if any, or None."""
+        return self._props.get('doc', None)
+
+    @property
+    def props(self):
+        """Reports all the properties of this schema.
+
+        Includes all properties, reserved and non reserved.
+        JSON properties of this schema are directly generated from this dict.
+
+        Returns:
+          A dictionary of properties associated to this schema.
+        """
+        return self._props
+
+    @property
+    def other_props(self):
+        """Returns: the dictionary of non-reserved properties."""
+        return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS))
+
+    def __str__(self):
+        """Returns: the JSON representation of this schema."""
+        return json.dumps(self.to_json(names=None))
+
+    # Converts the schema object into its AVRO specification representation.
+
+    # Schema types that have names (records, enums, and fixed) must be aware of not
+    # re-defining schemas that are already listed in the parameter names.
+    @abc.abstractmethod
+    def to_json(self, names):
+        ...
+
+
+# ------------------------------------------------------------------------------
+
+
+_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
+
+_RE_FULL_NAME = re.compile(
+    r'^'
+    r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*'  # optional namespace
+    r'([A-Za-z_][A-Za-z0-9_]*)'  # name
+    r'$'
+)
+
+
+class Name(object):
+    """Representation of an Avro name."""
+
+    def __init__(self, name, namespace=None):
+        """Parses an Avro name.
+
+        Args:
+          name: Avro name to parse (relative or absolute).
+          namespace: Optional explicit namespace if the name is relative.
+        """
+        # Normalize: namespace is always defined as a string, possibly empty.
+        if namespace is None:
+            namespace = ''
+
+        if '.' in name:
+            # name is absolute, namespace is ignored:
+            self._fullname = name
+
+            match = _RE_FULL_NAME.match(self._fullname)
+            if match is None:
+                raise SchemaParseException(
+                    f'Invalid absolute schema name: {self._fullname!r}.')
+
+            self._name = match.group(1)
+            self._namespace = self._fullname[:-(len(self._name) + 1)]
+
+        else:
+            # name is relative, combine with explicit namespace:
+            self._name = name
+            self._namespace = namespace
+            self._fullname = (self._name
+                              if (not self._namespace) else
+                              f'{self._namespace}.{self._name}')
+
+            # Validate the fullname:
+            if _RE_FULL_NAME.match(self._fullname) is None:
+                raise SchemaParseException(f"Invalid schema name {self._fullname!r} inferred from "
+                                           f"name {self._name!r} and namespace {self._namespace!r}.")
+
+    def __eq__(self, other):
+        if not isinstance(other, Name):
+            return NotImplemented
+        return self.fullname == other.fullname
+
+    @property
+    def simple_name(self):
+        """Returns: the simple name part of this name."""
+        return self._name
+
+    @property
+    def namespace(self):
+        """Returns: this name's namespace, possible the empty string."""
+        return self._namespace
+
+    @property
+    def fullname(self):
+        """Returns: the full name."""
+        return self._fullname
+
+
+# ------------------------------------------------------------------------------
+
+
+class Names(object):
+    """Tracks Avro named schemas and default namespace during parsing."""
+
+    def __init__(self, default_namespace=None, names=None):
+        """Initializes a new name tracker.
+
+        Args:
+          default_namespace: Optional default namespace.
+          names: Optional initial mapping of known named schemas.
+        """
+        if names is None:
+            names = {}
+        self._names = names
+        self._default_namespace = default_namespace
+
+    @property
+    def names(self):
+        """Returns: the mapping of known named schemas."""
+        return self._names
+
+    @property
+    def default_namespace(self):
+        """Returns: the default namespace, if any, or None."""
+        return self._default_namespace
+
+    def new_with_default_namespace(self, namespace):
+        """Creates a new name tracker from this tracker, but with a new default ns.
+
+        :param Any namespace: New default namespace to use.
+        :returns: New name tracker with the specified default namespace.
+        :rtype: Names
+        """
+        return Names(names=self._names, default_namespace=namespace)
+
+    def get_name(self, name, namespace=None):
+        """Resolves the Avro name according to this name tracker's state.
+
+        :param Any name: Name to resolve (absolute or relative).
+        :param Optional[Any] namespace: Optional explicit namespace.
+        :returns: The specified name, resolved according to this tracker.
+        :rtype: Name
+        """
+        if namespace is None:
+            namespace = self._default_namespace
+        return Name(name=name, namespace=namespace)
+
+    def get_schema(self, name, namespace=None):
+        """Resolves an Avro schema by name.
+
+        :param Any name: Name (absolute or relative) of the Avro schema to look up.
+        :param Optional[Any] namespace: Optional explicit namespace.
+        :returns: The schema with the specified name, if any, or None
+        :rtype: Union[Any, None]
+        """
+        avro_name = self.get_name(name=name, namespace=namespace)
+        return self._names.get(avro_name.fullname, None)
+
+    # Given a properties, return properties with namespace removed if it matches the own default namespace
+    def prune_namespace(self, properties):
+        if self.default_namespace is None:
+            # I have no default -- no change
+            return properties
+        if 'namespace' not in properties:
+            # he has no namespace - no change
+            return properties
+        if properties['namespace'] != self.default_namespace:
+            # we're different - leave his stuff alone
+            return properties
+        # we each have a namespace and it's redundant. delete his.
+        prunable = properties.copy()
+        del prunable['namespace']
+        return prunable
+
+    def register(self, schema):
+        """Registers a new named schema in this tracker.
+
+        :param Any schema: Named Avro schema to register in this tracker.
+        """
+        if schema.fullname in VALID_TYPES:
+            raise SchemaParseException(
+                f'{schema.fullname} is a reserved type name.')
+        if schema.fullname in self.names:
+            raise SchemaParseException(
+                f'Avro name {schema.fullname!r} already exists.')
+
+        logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname)
+        self._names[schema.fullname] = schema
+
+
+# ------------------------------------------------------------------------------
+
+
+class NamedSchema(Schema):
+    """Abstract base class for named schemas.
+
+    Named schemas are enumerated in NAMED_TYPES.
+    """
+
+    def __init__(
+            self,
+            data_type,
+            name=None,
+            namespace=None,
+            names=None,
+            other_props=None,
+    ):
+        """Initializes a new named schema object.
+
+        Args:
+          data_type: Type of the named schema.
+          name: Name (absolute or relative) of the schema.
+          namespace: Optional explicit namespace if name is relative.
+          names: Tracker to resolve and register Avro names.
+          other_props: Optional map of additional properties of the schema.
+        """
+        assert (data_type in NAMED_TYPES), (f'Invalid named type: {data_type!r}')
+        self._avro_name = names.get_name(name=name, namespace=namespace)
+
+        super(NamedSchema, self).__init__(data_type, other_props)
+
+        names.register(self)
+
+        self._props['name'] = self.name
+        if self.namespace:
+            self._props['namespace'] = self.namespace
+
+    @property
+    def avro_name(self):
+        """Returns: the Name object describing this schema's name."""
+        return self._avro_name
+
+    @property
+    def name(self):
+        return self._avro_name.simple_name
+
+    @property
+    def namespace(self):
+        return self._avro_name.namespace
+
+    @property
+    def fullname(self):
+        return self._avro_name.fullname
+
+    def name_ref(self, names):
+        """Reports this schema name relative to the specified name tracker.
+
+        :param Any names: Avro name tracker to relativize this schema name against.
+        :returns: This schema name, relativized against the specified name tracker.
+        :rtype: Any
+        """
+        if self.namespace == names.default_namespace:
+            return self.name
+        return self.fullname
+
+    # Converts the schema object into its AVRO specification representation.
+
+    # Schema types that have names (records, enums, and fixed) must be aware
+    # of not re-defining schemas that are already listed in the parameter names.
+    @abc.abstractmethod
+    def to_json(self, names):
+        ...
+
+# ------------------------------------------------------------------------------
+
+
+_NO_DEFAULT = object()
+
+
+class Field(object):
+    """Representation of the schema of a field in a record."""
+
+    def __init__(
+            self,
+            data_type,
+            name,
+            index,
+            has_default,
+            default=_NO_DEFAULT,
+            order=None,
+            doc=None,
+            other_props=None
+    ):
+        """Initializes a new Field object.
+
+        Args:
+          data_type: Avro schema of the field.
+          name: Name of the field.
+          index: 0-based position of the field.
+          has_default:
+          default:
+          order:
+          doc:
+          other_props:
+        """
+        if (not isinstance(name, str)) or (not name):
+            raise SchemaParseException(f'Invalid record field name: {name!r}.')
+        if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):
+            raise SchemaParseException(f'Invalid record field order: {order!r}.')
+
+        # All properties of this record field:
+        self._props = {}
+
+        self._has_default = has_default
+        if other_props:
+            self._props.update(other_props)
+
+        self._index = index
+        self._type = self._props['type'] = data_type
+        self._name = self._props['name'] = name
+
+        if has_default:
+            self._props['default'] = default
+
+        if order is not None:
+            self._props['order'] = order
+
+        if doc is not None:
+            self._props['doc'] = doc
+
+    @property
+    def type(self):
+        """Returns: the schema of this field."""
+        return self._type
+
+    @property
+    def name(self):
+        """Returns: this field name."""
+        return self._name
+
+    @property
+    def index(self):
+        """Returns: the 0-based index of this field in the record."""
+        return self._index
+
+    @property
+    def default(self):
+        return self._props['default']
+
+    @property
+    def has_default(self):
+        return self._has_default
+
+    @property
+    def order(self):
+        return self._props.get('order', None)
+
+    @property
+    def doc(self):
+        return self._props.get('doc', None)
+
+    @property
+    def props(self):
+        return self._props
+
+    @property
+    def other_props(self):
+        return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS)
+
+    def __str__(self):
+        return json.dumps(self.to_json())
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        to_dump = self.props.copy()
+        to_dump['type'] = self.type.to_json(names)
+        return to_dump
+
+    def __eq__(self, that):
+        to_cmp = json.loads(str(self))
+        return to_cmp == json.loads(str(that))
+
+
+# ------------------------------------------------------------------------------
+# Primitive Types
+
+
+class PrimitiveSchema(Schema):
+    """Schema of a primitive Avro type.
+
+    Valid primitive types are defined in PRIMITIVE_TYPES.
+    """
+
+    def __init__(self, data_type, other_props=None):
+        """Initializes a new schema object for the specified primitive type.
+
+        Args:
+          data_type: Type of the schema to construct. Must be primitive.
+        """
+        if data_type not in PRIMITIVE_TYPES:
+            raise AvroException(f'{data_type!r} is not a valid primitive type.')
+        super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)
+
+    @property
+    def name(self):
+        """Returns: the simple name of this schema."""
+        # The name of a primitive type is the type itself.
+        return self.type
+
+    @property
+    def fullname(self):
+        """Returns: the fully qualified name of this schema."""
+        # The full name is the simple name for primitive schema.
+        return self.name
+
+    def to_json(self, names=None):
+        if len(self.props) == 1:
+            return self.fullname
+        return self.props
+
+    def __eq__(self, that):
+        return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+# Complex Types (non-recursive)
+
+
+class FixedSchema(NamedSchema):
+    def __init__(
+            self,
+            name,
+            namespace,
+            size,
+            names=None,
+            other_props=None,
+    ):
+        # Ensure valid ctor args
+        if not isinstance(size, int):
+            fail_msg = 'Fixed Schema requires a valid integer for size property.'
+            raise AvroException(fail_msg)
+
+        super(FixedSchema, self).__init__(
+            data_type=FIXED,
+            name=name,
+            namespace=namespace,
+            names=names,
+            other_props=other_props,
+        )
+        self._props['size'] = size
+
+    @property
+    def size(self):
+        """Returns: the size of this fixed schema, in bytes."""
+        return self._props['size']
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        if self.fullname in names.names:
+            return self.name_ref(names)
+        names.names[self.fullname] = self
+        return names.prune_namespace(self.props)
+
+    def __eq__(self, that):
+        return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+
+
+class EnumSchema(NamedSchema):
+    def __init__(
+            self,
+            name,
+            namespace,
+            symbols,
+            names=None,
+            doc=None,
+            other_props=None,
+    ):
+        """Initializes a new enumeration schema object.
+
+        Args:
+          name: Simple name of this enumeration.
+          namespace: Optional namespace.
+          symbols: Ordered list of symbols defined in this enumeration.
+          names:
+          doc:
+          other_props:
+        """
+        symbols = tuple(symbols)
+        symbol_set = frozenset(symbols)
+        if (len(symbol_set) != len(symbols)
+                or not all(map(lambda symbol: isinstance(symbol, str), symbols))):
+            raise AvroException(
+                f'Invalid symbols for enum schema: {symbols!r}.')
+
+        super(EnumSchema, self).__init__(
+            data_type=ENUM,
+            name=name,
+            namespace=namespace,
+            names=names,
+            other_props=other_props,
+        )
+
+        self._props['symbols'] = symbols
+        if doc is not None:
+            self._props['doc'] = doc
+
+    @property
+    def symbols(self):
+        """Returns: the symbols defined in this enum."""
+        return self._props['symbols']
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        if self.fullname in names.names:
+            return self.name_ref(names)
+        names.names[self.fullname] = self
+        return names.prune_namespace(self.props)
+
+    def __eq__(self, that):
+        return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+# Complex Types (recursive)
+
+
+class ArraySchema(Schema):
+    """Schema of an array."""
+
+    def __init__(self, items, other_props=None):
+        """Initializes a new array schema object.
+
+        Args:
+          items: Avro schema of the array items.
+          other_props:
+        """
+        super(ArraySchema, self).__init__(
+            data_type=ARRAY,
+            other_props=other_props,
+        )
+        self._items_schema = items
+        self._props['items'] = items
+
+    @property
+    def items(self):
+        """Returns: the schema of the items in this array."""
+        return self._items_schema
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        to_dump = self.props.copy()
+        item_schema = self.items
+        to_dump['items'] = item_schema.to_json(names)
+        return to_dump
+
+    def __eq__(self, that):
+        to_cmp = json.loads(str(self))
+        return to_cmp == json.loads(str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class MapSchema(Schema):
+    """Schema of a map."""
+
+    def __init__(self, values, other_props=None):
+        """Initializes a new map schema object.
+
+        Args:
+          values: Avro schema of the map values.
+          other_props:
+        """
+        super(MapSchema, self).__init__(
+            data_type=MAP,
+            other_props=other_props,
+        )
+        self._values_schema = values
+        self._props['values'] = values
+
+    @property
+    def values(self):
+        """Returns: the schema of the values in this map."""
+        return self._values_schema
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        to_dump = self.props.copy()
+        to_dump['values'] = self.values.to_json(names)
+        return to_dump
+
+    def __eq__(self, that):
+        to_cmp = json.loads(str(self))
+        return to_cmp == json.loads(str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class UnionSchema(Schema):
+    """Schema of a union."""
+
+    def __init__(self, schemas):
+        """Initializes a new union schema object.
+
+        Args:
+          schemas: Ordered collection of schema branches in the union.
+        """
+        super(UnionSchema, self).__init__(data_type=UNION)
+        self._schemas = tuple(schemas)
+
+        # Validate the schema branches:
+
+        # All named schema names are unique:
+        named_branches = tuple(
+            filter(lambda schema: schema.type in NAMED_TYPES, self._schemas))
+        unique_names = frozenset(map(lambda schema: schema.fullname, named_branches))
+        if len(unique_names) != len(named_branches):
+            schemas = ''.join(map(lambda schema: (f'\n\t - {schema}'), self._schemas))
+            raise AvroException(f'Invalid union branches with duplicate schema name:{schemas}')
+
+        # Types are unique within unnamed schemas, and union is not allowed:
+        unnamed_branches = tuple(
+            filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas))
+        unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches))
+        if UNION in unique_types:
+            schemas = ''.join(map(lambda schema: (f'\n\t - {schema}'), self._schemas))
+            raise AvroException(f'Invalid union branches contain other unions:{schemas}')
+        if len(unique_types) != len(unnamed_branches):
+            schemas = ''.join(map(lambda schema: (f'\n\t - {schema}'), self._schemas))
+            raise AvroException(f'Invalid union branches with duplicate type:{schemas}')
+
+    @property
+    def schemas(self):
+        """Returns: the ordered list of schema branches in the union."""
+        return self._schemas
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        to_dump = []
+        for schema in self.schemas:
+            to_dump.append(schema.to_json(names))
+        return to_dump
+
+    def __eq__(self, that):
+        to_cmp = json.loads(str(self))
+        return to_cmp == json.loads(str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class ErrorUnionSchema(UnionSchema):
+    """Schema representing the declared errors of a protocol message."""
+
+    def __init__(self, schemas):
+        """Initializes an error-union schema.
+
+        Args:
+          schema: collection of error schema.
+        """
+        # Prepend "string" to handle system errors
+        schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas)
+        super(ErrorUnionSchema, self).__init__(schemas=schemas)
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        to_dump = []
+        for schema in self.schemas:
+            # Don't print the system error schema
+            if schema.type == STRING:
+                continue
+            to_dump.append(schema.to_json(names))
+        return to_dump
+
+
+# ------------------------------------------------------------------------------
+
+
+class RecordSchema(NamedSchema):
+    """Schema of a record."""
+
+    @staticmethod
+    def _make_field(index, field_desc, names):
+        """Builds field schemas from a list of field JSON descriptors.
+
+        :param int index: 0-based index of the field in the record.
+        :param Any field_desc: JSON descriptors of a record field.
+        :param Any names: The names for this schema.
+        :returns: The field schema.
+        :rtype: Field
+        """
+        field_schema = schema_from_json_data(
+            json_data=field_desc['type'],
+            names=names,
+        )
+        other_props = (
+            dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))
+        return Field(
+            data_type=field_schema,
+            name=field_desc['name'],
+            index=index,
+            has_default=('default' in field_desc),
+            default=field_desc.get('default', _NO_DEFAULT),
+            order=field_desc.get('order', None),
+            doc=field_desc.get('doc', None),
+            other_props=other_props,
+        )
+
+    @staticmethod
+    def make_field_list(field_desc_list, names):
+        """Builds field schemas from a list of field JSON descriptors.
+        Guarantees field name unicity.
+
+        :param Any field_desc_list: Collection of field JSON descriptors.
+        :param Any names: The names for this schema.
+        :returns: Field schemas.
+        :rtype: Field
+        """
+        for index, field_desc in enumerate(field_desc_list):
+            yield RecordSchema._make_field(index, field_desc, names)
+
+    @staticmethod
+    def _make_field_map(fields):
+        """Builds the field map.
+        Guarantees field name unicity.
+
+        :param Any fields: Iterable of field schema.
+        :returns: A map of field schemas, indexed by name.
+        :rtype: Dict[Any, Any]
+        """
+        field_map = {}
+        for field in fields:
+            if field.name in field_map:
+                raise SchemaParseException(
+                    f'Duplicate record field name {field.name!r}.')
+            field_map[field.name] = field
+        return field_map
+
+    def __init__(
+            self,
+            name,
+            namespace,
+            fields=None,
+            make_fields=None,
+            names=None,
+            record_type=RECORD,
+            doc=None,
+            other_props=None
+    ):
+        """Initializes a new record schema object.
+
+        Args:
+          name: Name of the record (absolute or relative).
+          namespace: Optional namespace the record belongs to, if name is relative.
+          fields: collection of fields to add to this record.
+              Exactly one of fields or make_fields must be specified.
+          make_fields: function creating the fields that belong to the record.
+              The function signature is: make_fields(names) -> ordered field list.
+              Exactly one of fields or make_fields must be specified.
+          names:
+          record_type: Type of the record: one of RECORD, ERROR or REQUEST.
+              Protocol requests are not named.
+          doc:
+          other_props:
+        """
+        if record_type == REQUEST:
+            # Protocol requests are not named:
+            super(RecordSchema, self).__init__(
+                data_type=REQUEST,
+                other_props=other_props,
+            )
+        elif record_type in [RECORD, ERROR]:
+            # Register this record name in the tracker:
+            super(RecordSchema, self).__init__(
+                data_type=record_type,
+                name=name,
+                namespace=namespace,
+                names=names,
+                other_props=other_props,
+            )
+        else:
+            raise SchemaParseException(
+                f'Invalid record type: {record_type!r}.')
+
+        nested_names = []
+        if record_type in [RECORD, ERROR]:
+            avro_name = names.get_name(name=name, namespace=namespace)
+            nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)
+        elif record_type == REQUEST:
+            # Protocol request has no name: no need to change default namespace:
+            nested_names = names
+
+        if fields is None:
+            fields = make_fields(names=nested_names)
+        else:
+            assert make_fields is None
+        self._fields = tuple(fields)
+
+        self._field_map = RecordSchema._make_field_map(self._fields)
+
+        self._props['fields'] = fields
+        if doc is not None:
+            self._props['doc'] = doc
+
+    @property
+    def fields(self):
+        """Returns: the field schemas, as an ordered tuple."""
+        return self._fields
+
+    @property
+    def field_map(self):
+        """Returns: a read-only map of the field schemas index by field names."""
+        return self._field_map
+
+    def to_json(self, names=None):
+        if names is None:
+            names = Names()
+        # Request records don't have names
+        if self.type == REQUEST:
+            return [f.to_json(names) for f in self.fields]
+
+        if self.fullname in names.names:
+            return self.name_ref(names)
+        names.names[self.fullname] = self
+
+        to_dump = names.prune_namespace(self.props.copy())
+        to_dump['fields'] = [f.to_json(names) for f in self.fields]
+        return to_dump
+
+    def __eq__(self, that):
+        to_cmp = json.loads(str(self))
+        return to_cmp == json.loads(str(that))
+
+
+# ------------------------------------------------------------------------------
+# Module functions
+
+
+def filter_keys_out(items, keys):
+    """Filters a collection of (key, value) items.
+    Exclude any item whose key belongs to keys.
+
+    :param Dict[Any, Any] items: Dictionary of items to filter the keys out of.
+    :param Dict[Any, Any] keys: Dictionary of keys to filter the extracted keys against.
+    :returns: Filtered items.
+    :rtype: Tuple(Any, Any)
+    """
+    for key, value in items.items():
+        if key in keys:
+            continue
+        yield key, value
+
+
+# ------------------------------------------------------------------------------
+
+
+def _schema_from_json_string(json_string, names):
+    if json_string in PRIMITIVE_TYPES:
+        return PrimitiveSchema(data_type=json_string)
+
+    # Look for a known named schema:
+    schema = names.get_schema(name=json_string)
+    if schema is None:
+        raise SchemaParseException(f"Unknown named schema {json_string!r}, known names: {sorted(names.names)!r}.")
+    return schema
+
+
+def _schema_from_json_array(json_array, names):
+    def MakeSchema(desc):
+        return schema_from_json_data(json_data=desc, names=names)
+
+    return UnionSchema(map(MakeSchema, json_array))
+
+
+def _schema_from_json_object(json_object, names):
+    data_type = json_object.get('type')
+    if data_type is None:
+        raise SchemaParseException(
+            f'Avro schema JSON descriptor has no "type" property: {json_object!r}')
+
+    other_props = dict(
+        filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS))
+
+    if data_type in PRIMITIVE_TYPES:
+        # FIXME should not ignore other properties
+        result = PrimitiveSchema(data_type, other_props=other_props)
+
+    elif data_type in NAMED_TYPES:
+        name = json_object.get('name')
+        namespace = json_object.get('namespace', names.default_namespace)
+        if data_type == FIXED:
+            size = json_object.get('size')
+            result = FixedSchema(name, namespace, size, names, other_props)
+        elif data_type == ENUM:
+            symbols = json_object.get('symbols')
+            doc = json_object.get('doc')
+            result = EnumSchema(name, namespace, symbols, names, doc, other_props)
+
+        elif data_type in [RECORD, ERROR]:
+            field_desc_list = json_object.get('fields', ())
+
+            def MakeFields(names):
+                return tuple(RecordSchema.make_field_list(field_desc_list, names))
+
+            result = RecordSchema(
+                name=name,
+                namespace=namespace,
+                make_fields=MakeFields,
+                names=names,
+                record_type=data_type,
+                doc=json_object.get('doc'),
+                other_props=other_props,
+            )
+        else:
+            raise ValueError(f'Internal error: unknown type {data_type!r}.')
+
+    elif data_type in VALID_TYPES:
+        # Unnamed, non-primitive Avro type:
+
+        if data_type == ARRAY:
+            items_desc = json_object.get('items')
+            if items_desc is None:
+                raise SchemaParseException(f'Invalid array schema descriptor with no "items" : {json_object!r}.')
+            result = ArraySchema(
+                items=schema_from_json_data(items_desc, names),
+                other_props=other_props,
+            )
+
+        elif data_type == MAP:
+            values_desc = json_object.get('values')
+            if values_desc is None:
+                raise SchemaParseException(f'Invalid map schema descriptor with no "values" : {json_object!r}.')
+            result = MapSchema(
+                values=schema_from_json_data(values_desc, names=names),
+                other_props=other_props,
+            )
+
+        elif data_type == ERROR_UNION:
+            error_desc_list = json_object.get('declared_errors')
+            assert error_desc_list is not None
+            error_schemas = map(
+                lambda desc: schema_from_json_data(desc, names=names),
+                error_desc_list)
+            result = ErrorUnionSchema(schemas=error_schemas)
+
+        else:
+            raise ValueError(f'Internal error: unknown type {data_type!r}.')
+    else:
+        raise SchemaParseException(f'Invalid JSON descriptor for an Avro schema: {json_object!r}')
+    return result
+
+
+# Parsers for the JSON data types:
+_JSONDataParserTypeMap = {
+    str: _schema_from_json_string,
+    list: _schema_from_json_array,
+    dict: _schema_from_json_object,
+}
+
+
+def schema_from_json_data(json_data, names=None):
+    """Builds an Avro Schema from its JSON descriptor.
+    Raises SchemaParseException if the descriptor is invalid.
+
+    :param Any json_data: JSON data representing the descriptor of the Avro schema.
+    :param Any names: Optional tracker for Avro named schemas.
+    :returns: The Avro schema parsed from the JSON descriptor.
+    :rtype: Any
+    """
+    if names is None:
+        names = Names()
+
+    # Select the appropriate parser based on the JSON data type:
+    parser = _JSONDataParserTypeMap.get(type(json_data))
+    if parser is None:
+        raise SchemaParseException(
+            f'Invalid JSON descriptor for an Avro schema: {json_data!r}.')
+    return parser(json_data, names=names)
+
+
+# ------------------------------------------------------------------------------
+
+
+def parse(json_string):
+    """Constructs a Schema from its JSON descriptor in text form.
+    Raises SchemaParseException if a JSON parsing error is met, or if the JSON descriptor is invalid.
+
+    :param str json_string: String representation of the JSON descriptor of the schema.
+    :returns: The parsed schema.
+    :rtype: Any
+    """
+    try:
+        json_data = json.loads(json_string)
+    except Exception as exn:
+        raise SchemaParseException(
+            f'Error parsing schema from JSON: {json_string!r}. '
+            f'Error message: {exn!r}.') from exn
+
+    # Initialize the names object
+    names = Names()
+
+    # construct the Avro Schema object
+    return schema_from_json_data(json_data, names)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client.py
new file mode 100644
index 00000000..9dc8d2ec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client.py
@@ -0,0 +1,458 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+import uuid
+from typing import (
+    Any,
+    cast,
+    Dict,
+    Iterator,
+    Optional,
+    Tuple,
+    TYPE_CHECKING,
+    Union,
+)
+from urllib.parse import parse_qs, quote
+
+from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential, TokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import HttpTransport, RequestsTransport  # pylint: disable=non-abstract-transport-import, no-name-in-module
+from azure.core.pipeline.policies import (
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+    ProxyPolicy,
+    RedirectPolicy,
+    UserAgentPolicy,
+)
+
+from .authentication import SharedKeyCredentialPolicy
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import LocationMode, StorageConfiguration
+from .policies import (
+    ExponentialRetry,
+    QueueMessagePolicy,
+    StorageBearerTokenCredentialPolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageLoggingPolicy,
+    StorageRequestHook,
+    StorageResponseHook,
+)
+from .request_handlers import serialize_batch_body, _get_batch_request_delimiter
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .shared_access_signature import QueryStringConstants
+from .._version import VERSION
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class StorageAccountHostsMixin(object):
+    _client: Any
+    def __init__(
+        self,
+        parsed_url: Any,
+        service: str,
+        credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+        self._hosts = kwargs.get("_hosts")
+        self.scheme = parsed_url.scheme
+        self._is_localhost = False
+
+        if service not in ["blob", "queue", "file-share", "dfs"]:
+            raise ValueError(f"Invalid service: {service}")
+        service_name = service.split('-')[0]
+        account = parsed_url.netloc.split(f".{service_name}.core.")
+
+        self.account_name = account[0] if len(account) > 1 else None
+        if not self.account_name and parsed_url.netloc.startswith("localhost") \
+                or parsed_url.netloc.startswith("127.0.0.1"):
+            self._is_localhost = True
+            self.account_name = parsed_url.path.strip("/")
+
+        self.credential = _format_shared_key_credential(self.account_name, credential)
+        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+            raise ValueError("Token credential is only supported with HTTPS.")
+
+        secondary_hostname = None
+        if hasattr(self.credential, "account_name"):
+            self.account_name = self.credential.account_name
+            secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}"
+
+        if not self._hosts:
+            if len(account) > 1:
+                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+            if kwargs.get("secondary_hostname"):
+                secondary_hostname = kwargs["secondary_hostname"]
+            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+        self._sdk_moniker = f"storage-{service}/{VERSION}"
+        self._config, self._pipeline = self._create_pipeline(self.credential, sdk_moniker=self._sdk_moniker, **kwargs)
+
+    def __enter__(self):
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._client.__exit__(*args)
+
+    def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self._client.close()
+
+    @property
+    def url(self):
+        """The full endpoint URL to this entity, including SAS token if used.
+
+        This could be either the primary endpoint,
+        or the secondary endpoint depending on the current :func:`location_mode`.
+        :returns: The full endpoint URL to this entity, including SAS token if used.
+        :rtype: str
+        """
+        return self._format_url(self._hosts[self._location_mode])
+
+    @property
+    def primary_endpoint(self):
+        """The full primary endpoint URL.
+
+        :rtype: str
+        """
+        return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+    @property
+    def primary_hostname(self):
+        """The hostname of the primary endpoint.
+
+        :rtype: str
+        """
+        return self._hosts[LocationMode.PRIMARY]
+
+    @property
+    def secondary_endpoint(self):
+        """The full secondary endpoint URL if configured.
+
+        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: str
+        :raise ValueError:
+        """
+        if not self._hosts[LocationMode.SECONDARY]:
+            raise ValueError("No secondary host configured.")
+        return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+    @property
+    def secondary_hostname(self):
+        """The hostname of the secondary endpoint.
+
+        If not available this will be None. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: Optional[str]
+        """
+        return self._hosts[LocationMode.SECONDARY]
+
+    @property
+    def location_mode(self):
+        """The location mode that the client is currently using.
+
+        By default this will be "primary". Options include "primary" and "secondary".
+
+        :rtype: str
+        """
+
+        return self._location_mode
+
+    @location_mode.setter
+    def location_mode(self, value):
+        if self._hosts.get(value):
+            self._location_mode = value
+            self._client._config.url = self.url  # pylint: disable=protected-access
+        else:
+            raise ValueError(f"No host URL for location mode: {value}")
+
+    @property
+    def api_version(self):
+        """The version of the Storage API used for requests.
+
+        :rtype: str
+        """
+        return self._client._config.version  # pylint: disable=protected-access
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            credential = cast(str, credential)
+            query_str += credential.lstrip("?")
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, Pipeline]:
+        self._credential_policy: Any = None
+        if hasattr(credential, "get_token"):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = StorageBearerTokenCredentialPolicy(cast(TokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+
+        config = kwargs.get("_configuration") or create_configuration(**kwargs)
+        if kwargs.get("_pipeline"):
+            return config, kwargs["_pipeline"]
+        transport = kwargs.get("transport")
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            transport = RequestsTransport(**kwargs)
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            RedirectPolicy(**kwargs),
+            StorageHosts(hosts=self._hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            StorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs)
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  # type: ignore
+        config.transport = transport  # type: ignore
+        return config, Pipeline(transport, policies=policies)
+
+    def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An iterator of HttpResponse objects.
+        :rtype: Iterator[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        batch_id = str(uuid.uuid1())
+
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version,
+                "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False)
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        Pipeline._prepare_multipart_mixed_request(request)  # pylint: disable=protected-access
+        body = serialize_batch_body(request.multipart_mixed_info[0], batch_id)
+        request.set_bytes_body(body)
+
+        temp = request.multipart_mixed_info
+        request.multipart_mixed_info = None
+        pipeline_response = self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+        request.multipart_mixed_info = temp
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts()
+            if raise_on_any_failure:
+                parts = list(response.parts())
+                if any(p for p in parts if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts
+                    )
+                    raise error
+                return iter(parts)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+
+class TransportWrapper(HttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, transport):
+        self._transport = transport
+
+    def send(self, request, **kwargs):
+        return self._transport.send(request, **kwargs)
+
+    def open(self):
+        pass
+
+    def close(self):
+        pass
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, *args):
+        pass
+
+
+def _format_shared_key_credential(
+    account_name: Optional[str],
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None  # pylint: disable=line-too-long
+) -> Any:
+    if isinstance(credential, str):
+        if not account_name:
+            raise ValueError("Unable to determine account name for shared key credential.")
+        credential = {"account_name": account_name, "account_key": credential}
+    if isinstance(credential, dict):
+        if "account_name" not in credential:
+            raise ValueError("Shared key credential missing 'account_name")
+        if "account_key" not in credential:
+            raise ValueError("Shared key credential missing 'account_key")
+        return SharedKeyCredentialPolicy(**credential)
+    if isinstance(credential, AzureNamedKeyCredential):
+        return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key)
+    return credential
+
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]]]:  # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+
+def create_configuration(**kwargs: Any) -> StorageConfiguration:
+     # Backwards compatibility if someone is not passing sdk_moniker
+    if not kwargs.get("sdk_moniker"):
+        kwargs["sdk_moniker"] = f"storage-{kwargs.pop('storage_sdk')}/{VERSION}"
+    config = StorageConfiguration(**kwargs)
+    config.headers_policy = StorageHeadersPolicy(**kwargs)
+    config.user_agent_policy = UserAgentPolicy(**kwargs)
+    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+    config.logging_policy = StorageLoggingPolicy(**kwargs)
+    config.proxy_policy = ProxyPolicy(**kwargs)
+    return config
+
+
+def parse_query(query_str: str) -> Tuple[Optional[str], Optional[str]]:
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+    sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values]
+    sas_token = None
+    if sas_params:
+        sas_token = "&".join(sas_params)
+
+    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+    return snapshot, sas_token
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client_async.py
new file mode 100644
index 00000000..6186b29d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/base_client_async.py
@@ -0,0 +1,280 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# mypy: disable-error-code="attr-defined"
+
+import logging
+from typing import Any, cast, Dict, Optional, Tuple, TYPE_CHECKING, Union
+
+from azure.core.async_paging import AsyncList
+from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+from azure.core.credentials_async import AsyncTokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.policies import (
+    AsyncRedirectPolicy,
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import StorageConfiguration
+from .policies import (
+    QueueMessagePolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageRequestHook,
+)
+from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+_LOGGER = logging.getLogger(__name__)
+
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+    def __enter__(self):
+        raise TypeError("Async client only supports 'async with'.")
+
+    def __exit__(self, *args):
+        pass
+
+    async def __aenter__(self):
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._client.__aexit__(*args)
+
+    async def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self._client.close()
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            query_str += credential.lstrip("?")  # type: ignore [union-attr]
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]] = None, # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, AsyncPipeline]:
+        self._credential_policy: Optional[
+            Union[AsyncStorageBearerTokenCredentialPolicy,
+            SharedKeyCredentialPolicy,
+            AzureSasCredentialPolicy]] = None
+        if hasattr(credential, 'get_token'):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(
+                                        cast(AsyncTokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+        config = kwargs.get('_configuration') or create_configuration(**kwargs)
+        if kwargs.get('_pipeline'):
+            return config, kwargs['_pipeline']
+        transport = kwargs.get('transport')
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            try:
+                from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+            except ImportError as exc:
+                raise ImportError("Unable to create async transport. Please check aiohttp is installed.") from exc
+            transport = AioHttpTransport(**kwargs)
+        hosts = self._hosts
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            AsyncRedirectPolicy(**kwargs),
+            StorageHosts(hosts=hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            AsyncStorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs),
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  #type: ignore
+        config.transport = transport #type: ignore
+        return config, AsyncPipeline(transport, policies=policies) #type: ignore
+
+    async def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> AsyncList["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An AsyncList of HttpResponse objects.
+        :rtype: AsyncList[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)  # type: ignore
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        pipeline_response = await self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts() # Return an AsyncIterator
+            if raise_on_any_failure:
+                parts_list = []
+                async for part in parts:
+                    parts_list.append(part)
+                if any(p for p in parts_list if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts_list
+                    )
+                    raise error
+                return AsyncList(parts_list)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]]]: # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, async_transport):
+        self._transport = async_transport
+
+    async def send(self, request, **kwargs):
+        return await self._transport.send(request, **kwargs)
+
+    async def open(self):
+        pass
+
+    async def close(self):
+        pass
+
+    async def __aenter__(self):
+        pass
+
+    async def __aexit__(self, *args):
+        pass
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/constants.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/constants.py
new file mode 100644
index 00000000..0b4b029a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/constants.py
@@ -0,0 +1,19 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from .._serialize import _SUPPORTED_API_VERSIONS
+
+
+X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1]
+
+# Default socket timeouts, in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 60
+
+DEFAULT_OAUTH_SCOPE = "/.default"
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/models.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/models.py
new file mode 100644
index 00000000..d78cd911
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/models.py
@@ -0,0 +1,585 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.configuration import Configuration
+from azure.core.pipeline.policies import UserAgentPolicy
+
+
+def get_enum_value(value):
+    if value is None or value in ["None", ""]:
+        return None
+    try:
+        return value.value
+    except AttributeError:
+        return value
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    # Generic storage values
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+
+    # Blob values
+    APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
+    BLOB_ACCESS_TIER_NOT_SUPPORTED_FOR_ACCOUNT_TYPE = "BlobAccessTierNotSupportedForAccountType"
+    BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
+    BLOB_NOT_FOUND = "BlobNotFound"
+    BLOB_OVERWRITTEN = "BlobOverwritten"
+    BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
+    BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
+    BLOCK_LIST_TOO_LONG = "BlockListTooLong"
+    CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
+    CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
+    CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
+    CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
+    CONTAINER_DISABLED = "ContainerDisabled"
+    CONTAINER_NOT_FOUND = "ContainerNotFound"
+    CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
+    COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
+    COPY_ID_MISMATCH = "CopyIdMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
+    INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
+    INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead.
+    INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
+    INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
+    INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
+    INVALID_BLOB_TIER = "InvalidBlobTier"
+    INVALID_BLOB_TYPE = "InvalidBlobType"
+    INVALID_BLOCK_ID = "InvalidBlockId"
+    INVALID_BLOCK_LIST = "InvalidBlockList"
+    INVALID_OPERATION = "InvalidOperation"
+    INVALID_PAGE_RANGE = "InvalidPageRange"
+    INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
+    INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
+    INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
+    LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
+    LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
+    LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
+    LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
+    LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
+    LEASE_ID_MISSING = "LeaseIdMissing"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
+    LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
+    LEASE_LOST = "LeaseLost"
+    LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
+    LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
+    LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
+    MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
+    NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
+    OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
+    PENDING_COPY_OPERATION = "PendingCopyOperation"
+    PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
+    SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
+    SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
+    SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
+    SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead.
+    SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    SNAPSHOTS_PRESENT = "SnapshotsPresent"
+    SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
+    SYSTEM_IN_USE = "SystemInUse"
+    TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
+    UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
+    BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
+    BLOB_ARCHIVED = "BlobArchived"
+    BLOB_NOT_ARCHIVED = "BlobNotArchived"
+
+    # Queue values
+    INVALID_MARKER = "InvalidMarker"
+    MESSAGE_NOT_FOUND = "MessageNotFound"
+    MESSAGE_TOO_LARGE = "MessageTooLarge"
+    POP_RECEIPT_MISMATCH = "PopReceiptMismatch"
+    QUEUE_ALREADY_EXISTS = "QueueAlreadyExists"
+    QUEUE_BEING_DELETED = "QueueBeingDeleted"
+    QUEUE_DISABLED = "QueueDisabled"
+    QUEUE_NOT_EMPTY = "QueueNotEmpty"
+    QUEUE_NOT_FOUND = "QueueNotFound"
+
+    # File values
+    CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory"
+    CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay"
+    DELETE_PENDING = "DeletePending"
+    DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty"
+    FILE_LOCK_CONFLICT = "FileLockConflict"
+    FILE_SHARE_PROVISIONED_BANDWIDTH_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedBandwidthDowngradeNotAllowed"
+    FILE_SHARE_PROVISIONED_IOPS_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedIopsDowngradeNotAllowed"
+    INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName"
+    PARENT_NOT_FOUND = "ParentNotFound"
+    READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute"
+    SHARE_ALREADY_EXISTS = "ShareAlreadyExists"
+    SHARE_BEING_DELETED = "ShareBeingDeleted"
+    SHARE_DISABLED = "ShareDisabled"
+    SHARE_NOT_FOUND = "ShareNotFound"
+    SHARING_VIOLATION = "SharingViolation"
+    SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress"
+    SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded"
+    SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported"
+    SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots"
+    CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed"
+
+    # DataLake values
+    CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero'
+    PATH_ALREADY_EXISTS = 'PathAlreadyExists'
+    INVALID_FLUSH_POSITION = 'InvalidFlushPosition'
+    INVALID_PROPERTY_NAME = 'InvalidPropertyName'
+    INVALID_SOURCE_URI = 'InvalidSourceUri'
+    UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion'
+    FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound'
+    PATH_NOT_FOUND = 'PathNotFound'
+    RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound'
+    SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound'
+    DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted'
+    FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists'
+    FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted'
+    INVALID_DESTINATION_PATH = 'InvalidDestinationPath'
+    INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath'
+    INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType'
+    LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken'
+    LEASE_NAME_MISMATCH = 'LeaseNameMismatch'
+    PATH_CONFLICT = 'PathConflict'
+    SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+    def __setitem__(self, key, item):
+        self.__dict__[key] = item
+
+    def __getitem__(self, key):
+        return self.__dict__[key]
+
+    def __repr__(self):
+        return str(self)
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __delitem__(self, key):
+        self.__dict__[key] = None
+
+    # Compare objects by comparing all attributes.
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    # Compare objects by comparing all attributes.
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+    def __contains__(self, key):
+        return key in self.__dict__
+
+    def has_key(self, k):
+        return k in self.__dict__
+
+    def update(self, *args, **kwargs):
+        return self.__dict__.update(*args, **kwargs)
+
+    def keys(self):
+        return [k for k in self.__dict__ if not k.startswith('_')]
+
+    def values(self):
+        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def items(self):
+        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def get(self, key, default=None):
+        if key in self.__dict__:
+            return self.__dict__[key]
+        return default
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g., Get/Set Service Properties,
+        Get Service Stats, List Containers/Queues/Shares)
+    :param bool container:
+        Access to container-level APIs (e.g., Create/Delete Container,
+        Create/Delete Queue, Create/Delete Share,
+        List Blobs/Files and Directories)
+    :param bool object:
+        Access to object-level APIs for blobs, queue messages, and
+        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+    """
+
+    service: bool = False
+    container: bool = False
+    object: bool = False
+    _str: str
+
+    def __init__(
+        self,
+        service: bool = False,
+        container: bool = False,
+        object: bool = False  # pylint: disable=redefined-builtin
+    ) -> None:
+        self.service = service
+        self.container = container
+        self.object = object
+        self._str = (('s' if self.service else '') +
+                ('c' if self.container else '') +
+                ('o' if self.object else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create a ResourceTypes from a string.
+
+        To specify service, container, or object you need only to
+        include the first letter of the word in the string. E.g. service and container,
+        you would provide a string "sc".
+
+        :param str string: Specify service, container, or object in
+            in the string with the first letter of the word.
+        :return: A ResourceTypes object
+        :rtype: ~azure.storage.blob.ResourceTypes
+        """
+        res_service = 's' in string
+        res_container = 'c' in string
+        res_object = 'o' in string
+
+        parsed = cls(res_service, res_container, res_object)
+        parsed._str = string
+        return parsed
+
+
+class AccountSasPermissions(object):
+    """
+    :class:`~ResourceTypes` class to be used with generate_account_sas
+    function and for the AccessPolicies used with set_*_acl. There are two types of
+    SAS which may be used to grant resource access. One is to grant access to a
+    specific resource (resource-specific). Another is to grant access to the
+    entire service for a specific account and allow certain operations based on
+    perms found here.
+
+    :param bool read:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits read permissions to the specified resource type.
+    :param bool write:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits write permissions to the specified resource type.
+    :param bool delete:
+        Valid for Container and Object resource types, except for queue messages.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool list:
+        Valid for Service and Container resource types only.
+    :param bool add:
+        Valid for the following Object resource types only: queue messages, and append blobs.
+    :param bool create:
+        Valid for the following Object resource types only: blobs and files.
+        Users can create new blobs or files, but may not overwrite existing
+        blobs or files.
+    :param bool update:
+        Valid for the following Object resource types only: queue messages.
+    :param bool process:
+        Valid for the following Object resource type only: queue messages.
+    :keyword bool tag:
+        To enable set or get tags on the blobs in the container.
+    :keyword bool filter_by_tags:
+        To enable get blobs by tags, this should be used together with list permission.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+        Valid for Object resource type of Blob only.
+    """
+
+    read: bool = False
+    write: bool = False
+    delete: bool = False
+    delete_previous_version: bool = False
+    list: bool = False
+    add: bool = False
+    create: bool = False
+    update: bool = False
+    process: bool = False
+    tag: bool = False
+    filter_by_tags: bool = False
+    set_immutability_policy: bool = False
+    permanent_delete: bool = False
+
+    def __init__(
+        self,
+        read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,  # pylint: disable=redefined-builtin
+        add: bool = False,
+        create: bool = False,
+        update: bool = False,
+        process: bool = False,
+        delete_previous_version: bool = False,
+        **kwargs
+    ) -> None:
+        self.read = read
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.list = list
+        self.add = add
+        self.create = create
+        self.update = update
+        self.process = process
+        self.tag = kwargs.pop('tag', False)
+        self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('l' if self.list else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('u' if self.update else '') +
+                     ('p' if self.process else '') +
+                     ('f' if self.filter_by_tags else '') +
+                     ('t' if self.tag else '') +
+                     ('i' if self.set_immutability_policy else '')
+                     )
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create AccountSasPermissions from a string.
+
+        To specify read, write, delete, etc. permissions you need only to
+        include the first letter of the word in the string. E.g. for read and write
+        permissions you would provide a string "rw".
+
+        :param str permission: Specify permissions in
+            the string with the first letter of the word.
+        :return: An AccountSasPermissions object
+        :rtype: ~azure.storage.blob.AccountSasPermissions
+        """
+        p_read = 'r' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_list = 'l' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_update = 'u' in permission
+        p_process = 'p' in permission
+        p_tag = 't' in permission
+        p_filter_by_tags = 'f' in permission
+        p_set_immutability_policy = 'i' in permission
+        parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+                     list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+                     filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy,
+                     permanent_delete=p_permanent_delete)
+
+        return parsed
+
+
+class Services(object):
+    """Specifies the services accessible with the account SAS.
+
+    :keyword bool blob:
+        Access for the `~azure.storage.blob.BlobServiceClient`. Default is False.
+    :keyword bool queue:
+        Access for the `~azure.storage.queue.QueueServiceClient`. Default is False.
+    :keyword bool fileshare:
+        Access for the `~azure.storage.fileshare.ShareServiceClient`. Default is False.
+    """
+
+    def __init__(
+        self, *,
+        blob: bool = False,
+        queue: bool = False,
+        fileshare: bool = False
+    ) -> None:
+        self.blob = blob
+        self.queue = queue
+        self.fileshare = fileshare
+        self._str = (('b' if self.blob else '') +
+                ('q' if self.queue else '') +
+                ('f' if self.fileshare else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create Services from a string.
+
+        To specify blob, queue, or file you need only to
+        include the first letter of the word in the string. E.g. for blob and queue
+        you would provide a string "bq".
+
+        :param str string: Specify blob, queue, or file in
+            in the string with the first letter of the word.
+        :return: A Services object
+        :rtype: ~azure.storage.blob.Services
+        """
+        res_blob = 'b' in string
+        res_queue = 'q' in string
+        res_file = 'f' in string
+
+        parsed = cls(blob=res_blob, queue=res_queue, fileshare=res_file)
+        parsed._str = string
+        return parsed
+
+
+class UserDelegationKey(object):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+    """
+
+    signed_oid: Optional[str] = None
+    """Object ID of this token."""
+    signed_tid: Optional[str] = None
+    """Tenant ID of the tenant that issued this token."""
+    signed_start: Optional[str] = None
+    """The datetime this token becomes valid."""
+    signed_expiry: Optional[str] = None
+    """The datetime this token expires."""
+    signed_service: Optional[str] = None
+    """What service this key is valid for."""
+    signed_version: Optional[str] = None
+    """The version identifier of the REST service that created this token."""
+    value: Optional[str] = None
+    """The user delegation key."""
+
+    def __init__(self):
+        self.signed_oid = None
+        self.signed_tid = None
+        self.signed_start = None
+        self.signed_expiry = None
+        self.signed_service = None
+        self.signed_version = None
+        self.value = None
+
+
+class StorageConfiguration(Configuration):
+    """
+    Specifies the configurable values used in Azure Storage.
+
+    :param int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :param int copy_polling_interval: The interval in seconds for polling copy operations.
+    :param int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob.
+    :param bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :param int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_chunk_upload_threshold: The max size for a single put operation.
+    :param int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :param int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :param int max_range_size: The max range size for file upload.
+
+    """
+
+    max_single_put_size: int
+    copy_polling_interval: int
+    max_block_size: int
+    min_large_block_upload_threshold: int
+    use_byte_buffer: bool
+    max_page_size: int
+    min_large_chunk_upload_threshold: int
+    max_single_get_size: int
+    max_chunk_get_size: int
+    max_range_size: int
+    user_agent_policy: UserAgentPolicy
+
+    def __init__(self, **kwargs):
+        super(StorageConfiguration, self).__init__(**kwargs)
+        self.max_single_put_size = kwargs.pop('max_single_put_size', 64 * 1024 * 1024)
+        self.copy_polling_interval = 15
+        self.max_block_size = kwargs.pop('max_block_size', 4 * 1024 * 1024)
+        self.min_large_block_upload_threshold = kwargs.get('min_large_block_upload_threshold', 4 * 1024 * 1024 + 1)
+        self.use_byte_buffer = kwargs.pop('use_byte_buffer', False)
+        self.max_page_size = kwargs.pop('max_page_size', 4 * 1024 * 1024)
+        self.min_large_chunk_upload_threshold = kwargs.pop('min_large_chunk_upload_threshold', 100 * 1024 * 1024 + 1)
+        self.max_single_get_size = kwargs.pop('max_single_get_size', 32 * 1024 * 1024)
+        self.max_chunk_get_size = kwargs.pop('max_chunk_get_size', 4 * 1024 * 1024)
+        self.max_range_size = kwargs.pop('max_range_size', 4 * 1024 * 1024)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/parser.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/parser.py
new file mode 100644
index 00000000..112c1984
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/parser.py
@@ -0,0 +1,53 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import datetime, timezone
+from typing import Optional
+
+EPOCH_AS_FILETIME = 116444736000000000  # January 1, 1970 as MS filetime
+HUNDREDS_OF_NANOSECONDS = 10000000
+
+
+def _to_utc_datetime(value: datetime) -> str:
+    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+def _rfc_1123_to_datetime(rfc_1123: str) -> Optional[datetime]:
+    """Converts an RFC 1123 date string to a UTC datetime.
+
+    :param str rfc_1123: The time and date in RFC 1123 format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not rfc_1123:
+        return None
+
+    return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z")
+
+
+def _filetime_to_datetime(filetime: str) -> Optional[datetime]:
+    """Converts an MS filetime string to a UTC datetime. "0" indicates None.
+    If parsing MS Filetime fails, tries RFC 1123 as backup.
+
+    :param str filetime: The time and date in MS filetime format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not filetime:
+        return None
+
+    # Try to convert to MS Filetime
+    try:
+        temp_filetime = int(filetime)
+        if temp_filetime == 0:
+            return None
+
+        return datetime.fromtimestamp((temp_filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc)
+    except ValueError:
+        pass
+
+    # Try RFC 1123 as backup
+    return _rfc_1123_to_datetime(filetime)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies.py
new file mode 100644
index 00000000..ee75cd5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies.py
@@ -0,0 +1,694 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import logging
+import random
+import re
+import uuid
+from io import SEEK_SET, UnsupportedOperation
+from time import time
+from typing import Any, Dict, Optional, TYPE_CHECKING
+from urllib.parse import (
+    parse_qsl,
+    urlencode,
+    urlparse,
+    urlunparse,
+)
+from wsgiref.handlers import format_date_time
+
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+from azure.core.pipeline.policies import (
+    BearerTokenCredentialPolicy,
+    HeadersPolicy,
+    HTTPPolicy,
+    NetworkTraceLoggingPolicy,
+    RequestHistory,
+    SansIOHTTPPolicy
+)
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .models import LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import TokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+# Are we out of retries?
+def is_exhausted(settings):
+    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+    retry_counts = list(filter(None, retry_counts))
+    if not retry_counts:
+        return False
+    return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+# Is this method/status code retryable? (Based on allowlists and control
+# variables such as the number of total retries to allow, whether to
+# respect the Retry-After header, whether this header is present, and
+# whether the returned status code is on the list of status codes to
+# be retried upon on the presence of the aforementioned header)
+def is_retry(response, mode):
+    status = response.http_response.status_code
+    if 300 <= status < 500:
+        # An exception occurred, but in most cases it was expected. Examples could
+        # include a 309 Conflict or 412 Precondition Failed.
+        if status == 404 and mode == LocationMode.SECONDARY:
+            # Response code 404 should be retried if secondary was used.
+            return True
+        if status == 408:
+            # Response code 408 is a timeout and should be retried.
+            return True
+        return False
+    if status >= 500:
+        # Response codes above 500 with the exception of 501 Not Implemented and
+        # 505 Version Not Supported indicate a server issue and should be retried.
+        if status in [501, 505]:
+            return False
+        return True
+    return False
+
+
+def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+def urljoin(base_url, stub_url):
+    parsed = urlparse(base_url)
+    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+    return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+    def on_request(self, request):
+        message_id = request.context.options.pop('queue_message_id', None)
+        if message_id:
+            request.http_request.url = urljoin(
+                request.http_request.url,
+                message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+    request_id_header_name = 'x-ms-client-request-id'
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        super(StorageHeadersPolicy, self).on_request(request)
+        current_time = format_date_time(time())
+        request.http_request.headers['x-ms-date'] = current_time
+
+        custom_id = request.context.options.pop('client_request_id', None)
+        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+    # def on_response(self, request, response):
+    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
+    #     if self.request_id_header_name in response.http_response.headers:
+
+    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
+    #             raise AzureError(
+    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
+    #                 "Service request ID: {}".format(
+    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
+    #                     response.http_response.headers['x-ms-request-id']),
+    #                 response=response.http_response
+    #             )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
+        self.hosts = hosts
+        super(StorageHosts, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request.context.options['hosts'] = self.hosts
+        parsed_url = urlparse(request.http_request.url)
+
+        # Detect what location mode we're currently requesting with
+        location_mode = LocationMode.PRIMARY
+        for key, value in self.hosts.items():
+            if parsed_url.netloc == value:
+                location_mode = key
+
+        # See if a specific location mode has been specified, and if so, redirect
+        use_location = request.context.options.pop('use_location', None)
+        if use_location:
+            # Lock retries to the specific location
+            request.context.options['retry_to_secondary'] = False
+            if use_location not in self.hosts:
+                raise ValueError(f"Attempting to use undefined host location {use_location}")
+            if use_location != location_mode:
+                # Update request URL to use the specified location
+                updated = parsed_url._replace(netloc=self.hosts[use_location])
+                request.http_request.url = updated.geturl()
+                location_mode = use_location
+
+        request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+    """A policy that logs HTTP request and response to the DEBUG logger.
+
+    This accepts both global configuration, and per-request level with "enable_http_logger"
+    """
+
+    def __init__(self, logging_enable: bool = False, **kwargs) -> None:
+        self.logging_body = kwargs.pop("logging_body", False)
+        super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs)
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        http_request = request.http_request
+        options = request.context.options
+        self.logging_body = self.logging_body or options.pop("logging_body", False)
+        if options.pop("logging_enable", self.enable_http_logger):
+            request.context["logging_enable"] = True
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                log_url = http_request.url
+                query_params = http_request.query
+                if 'sig' in query_params:
+                    log_url = log_url.replace(query_params['sig'], "sig=*****")
+                _LOGGER.debug("Request URL: %r", log_url)
+                _LOGGER.debug("Request method: %r", http_request.method)
+                _LOGGER.debug("Request headers:")
+                for header, value in http_request.headers.items():
+                    if header.lower() == 'authorization':
+                        value = '*****'
+                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+                        # take the url apart and scrub away the signed signature
+                        scheme, netloc, path, params, query, fragment = urlparse(value)
+                        parsed_qs = dict(parse_qsl(query))
+                        parsed_qs['sig'] = '*****'
+
+                        # the SAS needs to be put back together
+                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+                    _LOGGER.debug("    %r: %r", header, value)
+                _LOGGER.debug("Request body:")
+
+                if self.logging_body:
+                    _LOGGER.debug(str(http_request.body))
+                else:
+                    # We don't want to log the binary data of a file upload.
+                    _LOGGER.debug("Hidden body, please use logging_body to show body")
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log request: %r", err)
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.pop("logging_enable", self.enable_http_logger):
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                _LOGGER.debug("Response status: %r", response.http_response.status_code)
+                _LOGGER.debug("Response headers:")
+                for res_header, value in response.http_response.headers.items():
+                    _LOGGER.debug("    %r: %r", res_header, value)
+
+                # We don't want to log binary data if the response is a file.
+                _LOGGER.debug("Response content:")
+                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+                header = response.http_response.headers.get('content-disposition')
+                resp_content_type = response.http_response.headers.get("content-type", "")
+
+                if header and pattern.match(header):
+                    filename = header.partition('=')[2]
+                    _LOGGER.debug("File attachments: %s", filename)
+                elif resp_content_type.endswith("octet-stream"):
+                    _LOGGER.debug("Body contains binary data.")
+                elif resp_content_type.startswith("image"):
+                    _LOGGER.debug("Body contains image data.")
+
+                if self.logging_body and resp_content_type.startswith("text"):
+                    _LOGGER.debug(response.http_response.text())
+                elif self.logging_body:
+                    try:
+                        _LOGGER.debug(response.http_response.body())
+                    except ValueError:
+                        _LOGGER.debug("Body is streamable")
+
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._request_callback = kwargs.get('raw_request_hook')
+        super(StorageRequestHook, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+        if request_callback:
+            request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(StorageResponseHook, self).__init__()
+
+    def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+    """A simple policy that sends the given headers
+    with the request.
+
+    This will overwrite any headers already defined in the request.
+    """
+    header_name = 'Content-MD5'
+
+    def __init__(self, **kwargs: Any) -> None:  # pylint: disable=unused-argument
+        super(StorageContentValidation, self).__init__()
+
+    @staticmethod
+    def get_content_md5(data):
+        # Since HTTP does not differentiate between no content and empty content,
+        # we have to perform a None check.
+        data = data or b""
+        md5 = hashlib.md5() # nosec
+        if isinstance(data, bytes):
+            md5.update(data)
+        elif hasattr(data, 'read'):
+            pos = 0
+            try:
+                pos = data.tell()
+            except:  # pylint: disable=bare-except
+                pass
+            for chunk in iter(lambda: data.read(4096), b""):
+                md5.update(chunk)
+            try:
+                data.seek(pos, SEEK_SET)
+            except (AttributeError, IOError) as exc:
+                raise ValueError("Data should be bytes or a seekable file-like object.") from exc
+        else:
+            raise ValueError("Data should be bytes or a seekable file-like object.")
+
+        return md5.digest()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        validate_content = request.context.options.pop('validate_content', False)
+        if validate_content and request.http_request.method != 'GET':
+            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+            request.http_request.headers[self.header_name] = computed_md5
+            request.context['validate_content_md5'] = computed_md5
+        request.context['validate_content'] = validate_content
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+            computed_md5 = request.context.get('validate_content_md5') or \
+                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+            if response.http_response.headers['content-md5'] != computed_md5:
+                raise AzureError((
+                    f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', "
+                    f"computed value is '{computed_md5}'."),
+                    response=response.http_response
+                )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    total_retries: int
+    """The max number of retries."""
+    connect_retries: int
+    """The max number of connect retries."""
+    retry_read: int
+    """The max number of read retries."""
+    retry_status: int
+    """The max number of status retries."""
+    retry_to_secondary: bool
+    """Whether the secondary endpoint should be retried."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.total_retries = kwargs.pop('retry_total', 10)
+        self.connect_retries = kwargs.pop('retry_connect', 3)
+        self.read_retries = kwargs.pop('retry_read', 3)
+        self.status_retries = kwargs.pop('retry_status', 3)
+        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+        super(StorageRetryPolicy, self).__init__()
+
+    def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
+        """
+        A function which sets the next host location on the request, if applicable.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the next host location.
+        :param PipelineRequest request: A pipeline request object.
+        """
+        if settings['hosts'] and all(settings['hosts'].values()):
+            url = urlparse(request.url)
+            # If there's more than one possible location, retry to the alternative
+            if settings['mode'] == LocationMode.PRIMARY:
+                settings['mode'] = LocationMode.SECONDARY
+            else:
+                settings['mode'] = LocationMode.PRIMARY
+            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+            request.url = updated.geturl()
+
+    def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
+        body_position = None
+        if hasattr(request.http_request.body, 'read'):
+            try:
+                body_position = request.http_request.body.tell()
+            except (AttributeError, UnsupportedOperation):
+                # if body position cannot be obtained, then retries will not work
+                pass
+        options = request.context.options
+        return {
+            'total': options.pop("retry_total", self.total_retries),
+            'connect': options.pop("retry_connect", self.connect_retries),
+            'read': options.pop("retry_read", self.read_retries),
+            'status': options.pop("retry_status", self.status_retries),
+            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+            'mode': options.pop("location_mode", LocationMode.PRIMARY),
+            'hosts': options.pop("hosts", None),
+            'hook': options.pop("retry_hook", None),
+            'body_position': body_position,
+            'count': 0,
+            'history': []
+        }
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:  # pylint: disable=unused-argument
+        """ Formula for computing the current backoff.
+        Should be calculated by child class.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :returns: The backoff time.
+        :rtype: float
+        """
+        return 0
+
+    def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        transport.sleep(backoff)
+
+    def increment(
+        self, settings: Dict[str, Any],
+        request: "PipelineRequest",
+        response: Optional["PipelineResponse"] = None,
+        error: Optional[AzureError] = None
+    ) -> bool:
+        """Increment the retry counters.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the increment operation.
+        :param PipelineRequest request: A pipeline request object.
+        :param Optional[PipelineResponse] response: A pipeline response object.
+        :param Optional[AzureError] error: An error encountered during the request, or
+            None if the response was received successfully.
+        :returns: Whether the retry attempts are exhausted.
+        :rtype: bool
+        """
+        settings['total'] -= 1
+
+        if error and isinstance(error, ServiceRequestError):
+            # Errors when we're fairly sure that the server did not receive the
+            # request, so it should be safe to retry.
+            settings['connect'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        elif error and isinstance(error, ServiceResponseError):
+            # Errors that occur after the request has been started, so we should
+            # assume that the server began processing it.
+            settings['read'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the allowlist
+            if response:
+                settings['status'] -= 1
+                settings['history'].append(RequestHistory(request, http_response=response))
+
+        if not is_exhausted(settings):
+            if request.method not in ['PUT'] and settings['retry_secondary']:
+                self._set_next_host_location(settings, request)
+
+            # rewind the request body if it is a stream
+            if request.body and hasattr(request.body, 'read'):
+                # no position was saved, then retry would not work
+                if settings['body_position'] is None:
+                    return False
+                try:
+                    # attempt to rewind the body to the initial position
+                    request.body.seek(settings['body_position'], SEEK_SET)
+                except (UnsupportedOperation, ValueError):
+                    # if body is not seekable, then retry would not work
+                    return False
+            settings['count'] += 1
+            return True
+        return False
+
+    def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to get backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
+        super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies_async.py
new file mode 100644
index 00000000..86a4b4c0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/policies_async.py
@@ -0,0 +1,296 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import logging
+import random
+from typing import Any, Dict, TYPE_CHECKING
+
+from azure.core.exceptions import AzureError, StreamClosedError, StreamConsumedError
+from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .policies import encode_base64, is_retry, StorageContentValidation, StorageRetryPolicy
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        if asyncio.iscoroutine(settings['hook']):
+            await settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+        else:
+            settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+
+
+async def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        try:
+            await response.http_response.load_body()  # Load the body in memory and close the socket
+        except (StreamClosedError, StreamConsumedError):
+            pass
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                       encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(AsyncStorageResponseHook, self).__init__()
+
+    async def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = await self.next.send(request)
+        will_retry = is_retry(response, request.context.options.get('mode')) or await is_checksum_retry(response)
+
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            if asyncio.iscoroutine(response_callback):
+                await response_callback(response) # type: ignore
+            else:
+                response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    async def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        await transport.sleep(backoff)
+
+    async def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = await self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or await is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        await retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        await self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    await retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    await self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self,
+        initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3, **kwargs
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds. For example, by default the first retry
+        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+        third after (15+3^2) = 24 seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "AsyncTokenCredential", audience: str, **kwargs: Any) -> None:
+        super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    async def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        await self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/request_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/request_handlers.py
new file mode 100644
index 00000000..54927cc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/request_handlers.py
@@ -0,0 +1,270 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import stat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+from os import fstat
+from typing import Dict, Optional
+
+import isodate
+
+
+_LOGGER = logging.getLogger(__name__)
+
+_REQUEST_DELIMITER_PREFIX = "batch_"
+_HTTP1_1_IDENTIFIER = "HTTP/1.1"
+_HTTP_LINE_ENDING = "\r\n"
+
+
+def serialize_iso(attr):
+    """Serialize Datetime object into ISO-8601 formatted string.
+
+    :param Datetime attr: Object to be serialized.
+    :rtype: str
+    :raises: ValueError if format invalid.
+    """
+    if not attr:
+        return None
+    if isinstance(attr, str):
+        attr = isodate.parse_datetime(attr)
+    try:
+        utc = attr.utctimetuple()
+        if utc.tm_year > 9999 or utc.tm_year < 1:
+            raise OverflowError("Hit max or min date")
+
+        date = f"{utc.tm_year:04}-{utc.tm_mon:02}-{utc.tm_mday:02}T{utc.tm_hour:02}:{utc.tm_min:02}:{utc.tm_sec:02}"
+        return date + 'Z'
+    except (ValueError, OverflowError) as err:
+        raise ValueError("Unable to serialize datetime object.") from err
+    except AttributeError as err:
+        raise TypeError("ISO-8601 object must be valid datetime object.") from err
+
+def get_length(data):
+    length = None
+    # Check if object implements the __len__ method, covers most input cases such as bytearray.
+    try:
+        length = len(data)
+    except:  # pylint: disable=bare-except
+        pass
+
+    if not length:
+        # Check if the stream is a file-like stream object.
+        # If so, calculate the size using the file descriptor.
+        try:
+            fileno = data.fileno()
+        except (AttributeError, UnsupportedOperation):
+            pass
+        else:
+            try:
+                mode = fstat(fileno).st_mode
+                if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
+                    #st_size only meaningful if regular file or symlink, other types
+                    # e.g. sockets may return misleading sizes like 0
+                    return fstat(fileno).st_size
+            except OSError:
+                # Not a valid fileno, may be possible requests returned
+                # a socket number?
+                pass
+
+        # If the stream is seekable and tell() is implemented, calculate the stream size.
+        try:
+            current_position = data.tell()
+            data.seek(0, SEEK_END)
+            length = data.tell() - current_position
+            data.seek(current_position, SEEK_SET)
+        except (AttributeError, OSError, UnsupportedOperation):
+            pass
+
+    return length
+
+
+def read_length(data):
+    try:
+        if hasattr(data, 'read'):
+            read_data = b''
+            for chunk in iter(lambda: data.read(4096), b""):
+                read_data += chunk
+            return len(read_data), read_data
+        if hasattr(data, '__iter__'):
+            read_data = b''
+            for chunk in data:
+                read_data += chunk
+            return len(read_data), read_data
+    except:  # pylint: disable=bare-except
+        pass
+    raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+        start_range, end_range, start_range_required=True,
+        end_range_required=True, check_content_md5=False, align_to_page=False):
+    # If end range is provided, start range must be provided
+    if (start_range_required or end_range is not None) and start_range is None:
+        raise ValueError("start_range value cannot be None.")
+    if end_range_required and end_range is None:
+        raise ValueError("end_range value cannot be None.")
+
+    # Page ranges must be 512 aligned
+    if align_to_page:
+        if start_range is not None and start_range % 512 != 0:
+            raise ValueError(f"Invalid page blob start_range: {start_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        if end_range is not None and end_range % 512 != 511:
+            raise ValueError(f"Invalid page blob end_range: {end_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+
+    # Format based on whether end_range is present
+    range_header = None
+    if end_range is not None:
+        range_header = f'bytes={start_range}-{end_range}'
+    elif start_range is not None:
+        range_header = f"bytes={start_range}-"
+
+    # Content MD5 can only be provided for a complete range less than 4MB in size
+    range_validation = None
+    if check_content_md5:
+        if start_range is None or end_range is None:
+            raise ValueError("Both start and end range required for MD5 content validation.")
+        if end_range - start_range > 4 * 1024 * 1024:
+            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+        range_validation = 'true'
+
+    return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+    headers = {}
+    if metadata:
+        for key, value in metadata.items():
+            headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value
+    return headers
+
+
+def serialize_batch_body(requests, batch_id):
+    """
+    --<delimiter>
+    <subrequest>
+    --<delimiter>
+    <subrequest>    (repeated as needed)
+    --<delimiter>--
+
+    Serializes the requests in this batch to a single HTTP mixed/multipart body.
+
+    :param List[~azure.core.pipeline.transport.HttpRequest] requests:
+        a list of sub-request for the batch request
+    :param str batch_id:
+        to be embedded in batch sub-request delimiter
+    :returns: The body bytes for this batch.
+    :rtype: bytes
+    """
+
+    if requests is None or len(requests) == 0:
+        raise ValueError('Please provide sub-request(s) for this batch request')
+
+    delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
+    newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
+    batch_body = []
+
+    content_index = 0
+    for request in requests:
+        request.headers.update({
+            "Content-ID": str(content_index),
+            "Content-Length": str(0)
+        })
+        batch_body.append(delimiter_bytes)
+        batch_body.append(_make_body_from_sub_request(request))
+        batch_body.append(newline_bytes)
+        content_index += 1
+
+    batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
+    # final line of body MUST have \r\n at the end, or it will not be properly read by the service
+    batch_body.append(newline_bytes)
+
+    return b"".join(batch_body)
+
+
+def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
+    """
+    Gets the delimiter used for this batch request's mixed/multipart HTTP format.
+
+    :param str batch_id:
+        Randomly generated id
+    :param bool is_prepend_dashes:
+        Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
+    :param bool is_append_dashes:
+        Whether to include the ending dashes. Used in the body on the closing delimiter only.
+    :returns: The delimiter, WITHOUT a trailing newline.
+    :rtype: str
+    """
+
+    prepend_dashes = '--' if is_prepend_dashes else ''
+    append_dashes = '--' if is_append_dashes else ''
+
+    return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
+
+
+def _make_body_from_sub_request(sub_request):
+    """
+     Content-Type: application/http
+     Content-ID: <sequential int ID>
+     Content-Transfer-Encoding: <value> (if present)
+
+     <verb> <path><query> HTTP/<version>
+     <header key>: <header value> (repeated as necessary)
+     Content-Length: <value>
+     (newline if content length > 0)
+     <body> (if content length > 0)
+
+     Serializes an http request.
+
+     :param ~azure.core.pipeline.transport.HttpRequest sub_request:
+        Request to serialize.
+     :returns: The serialized sub-request in bytes
+     :rtype: bytes
+     """
+
+    # put the sub-request's headers into a list for efficient str concatenation
+    sub_request_body = []
+
+    # get headers for ease of manipulation; remove headers as they are used
+    headers = sub_request.headers
+
+    # append opening headers
+    sub_request_body.append("Content-Type: application/http")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-ID: ")
+    sub_request_body.append(headers.pop("Content-ID", ""))
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-Transfer-Encoding: binary")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append HTTP verb and path and query and HTTP version
+    sub_request_body.append(sub_request.method)
+    sub_request_body.append(' ')
+    sub_request_body.append(sub_request.url)
+    sub_request_body.append(' ')
+    sub_request_body.append(_HTTP1_1_IDENTIFIER)
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
+    for header_name, header_value in headers.items():
+        if header_value is not None:
+            sub_request_body.append(header_name)
+            sub_request_body.append(": ")
+            sub_request_body.append(header_value)
+            sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    return ''.join(sub_request_body).encode()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/response_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/response_handlers.py
new file mode 100644
index 00000000..af9a2fcd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/response_handlers.py
@@ -0,0 +1,200 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn
+from xml.etree.ElementTree import Element
+
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    DecodeError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceModifiedError,
+    ResourceNotFoundError,
+)
+from azure.core.pipeline.policies import ContentDecodePolicy
+
+from .authentication import AzureSigningError
+from .models import get_enum_value, StorageErrorCode, UserDelegationKey
+from .parser import _to_utc_datetime
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+    """There is a partial failure in batch operations.
+
+    :param str message: The message of the exception.
+    :param response: Server response to be deserialized.
+    :param list parts: A list of the parts in multipart response.
+    """
+
+    def __init__(self, message, response, parts):
+        self.parts = parts
+        super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+# Parses the blob length from the content range header: bytes 1-3/65537
+def parse_length_from_content_range(content_range):
+    if content_range is None:
+        return None
+
+    # First, split in space and take the second half: '1-3/65537'
+    # Next, split on slash and take the second half: '65537'
+    # Finally, convert to an int: 65537
+    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+    return normalized
+
+
+def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
+    try:
+        raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    except AttributeError:
+        raw_metadata = {k: v for k, v in response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return response.http_response.location_mode, deserialized
+
+
+def return_raw_deserialized(response, *_):
+    return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME]
+
+
+def process_storage_error(storage_error) -> NoReturn: # type: ignore [misc] # pylint:disable=too-many-statements, too-many-branches
+    raise_error = HttpResponseError
+    serialized = False
+    if isinstance(storage_error, AzureSigningError):
+        storage_error.message = storage_error.message + \
+            '. This is likely due to an invalid shared key. Please check your shared key and try again.'
+    if not storage_error.response or storage_error.response.status_code in [200, 204]:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (PartialBatchErrorException,
+                                  ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        try:
+            if error_body is None or len(error_body) == 0:
+                error_body = storage_error.response.reason
+        except AttributeError:
+            error_body = ''
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        # There is a chance error_dict is just a string
+        if error_dict and isinstance(error_dict, dict):
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met,
+                              StorageErrorCode.blob_overwritten]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.cannot_verify_copy_source,
+                              StorageErrorCode.blob_not_found,
+                              StorageErrorCode.queue_not_found,
+                              StorageErrorCode.container_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.share_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.blob_already_exists,
+                              StorageErrorCode.queue_already_exists,
+                              StorageErrorCode.container_already_exists,
+                              StorageErrorCode.container_being_deleted,
+                              StorageErrorCode.queue_being_deleted,
+                              StorageErrorCode.share_already_exists,
+                              StorageErrorCode.share_being_deleted]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+    internal_user_delegation_key = UserDelegationKey()
+    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+    internal_user_delegation_key.value = service_user_delegation_key.value
+    return internal_user_delegation_key
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/shared_access_signature.py
new file mode 100644
index 00000000..df29222b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/shared_access_signature.py
@@ -0,0 +1,252 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from datetime import date
+
+from .parser import _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+# cspell:ignoreRegExp rsc.
+# cspell:ignoreRegExp s..?id
+class QueryStringConstants(object):
+    SIGNED_SIGNATURE = 'sig'
+    SIGNED_PERMISSION = 'sp'
+    SIGNED_START = 'st'
+    SIGNED_EXPIRY = 'se'
+    SIGNED_RESOURCE = 'sr'
+    SIGNED_IDENTIFIER = 'si'
+    SIGNED_IP = 'sip'
+    SIGNED_PROTOCOL = 'spr'
+    SIGNED_VERSION = 'sv'
+    SIGNED_CACHE_CONTROL = 'rscc'
+    SIGNED_CONTENT_DISPOSITION = 'rscd'
+    SIGNED_CONTENT_ENCODING = 'rsce'
+    SIGNED_CONTENT_LANGUAGE = 'rscl'
+    SIGNED_CONTENT_TYPE = 'rsct'
+    START_PK = 'spk'
+    START_RK = 'srk'
+    END_PK = 'epk'
+    END_RK = 'erk'
+    SIGNED_RESOURCE_TYPES = 'srt'
+    SIGNED_SERVICES = 'ss'
+    SIGNED_OID = 'skoid'
+    SIGNED_TID = 'sktid'
+    SIGNED_KEY_START = 'skt'
+    SIGNED_KEY_EXPIRY = 'ske'
+    SIGNED_KEY_SERVICE = 'sks'
+    SIGNED_KEY_VERSION = 'skv'
+    SIGNED_ENCRYPTION_SCOPE = 'ses'
+
+    # for ADLS
+    SIGNED_AUTHORIZED_OID = 'saoid'
+    SIGNED_UNAUTHORIZED_OID = 'suoid'
+    SIGNED_CORRELATION_ID = 'scid'
+    SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+    @staticmethod
+    def to_list():
+        return [
+            QueryStringConstants.SIGNED_SIGNATURE,
+            QueryStringConstants.SIGNED_PERMISSION,
+            QueryStringConstants.SIGNED_START,
+            QueryStringConstants.SIGNED_EXPIRY,
+            QueryStringConstants.SIGNED_RESOURCE,
+            QueryStringConstants.SIGNED_IDENTIFIER,
+            QueryStringConstants.SIGNED_IP,
+            QueryStringConstants.SIGNED_PROTOCOL,
+            QueryStringConstants.SIGNED_VERSION,
+            QueryStringConstants.SIGNED_CACHE_CONTROL,
+            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+            QueryStringConstants.SIGNED_CONTENT_ENCODING,
+            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+            QueryStringConstants.SIGNED_CONTENT_TYPE,
+            QueryStringConstants.START_PK,
+            QueryStringConstants.START_RK,
+            QueryStringConstants.END_PK,
+            QueryStringConstants.END_RK,
+            QueryStringConstants.SIGNED_RESOURCE_TYPES,
+            QueryStringConstants.SIGNED_SERVICES,
+            QueryStringConstants.SIGNED_OID,
+            QueryStringConstants.SIGNED_TID,
+            QueryStringConstants.SIGNED_KEY_START,
+            QueryStringConstants.SIGNED_KEY_EXPIRY,
+            QueryStringConstants.SIGNED_KEY_SERVICE,
+            QueryStringConstants.SIGNED_KEY_VERSION,
+            QueryStringConstants.SIGNED_ENCRYPTION_SCOPE,
+            # for ADLS
+            QueryStringConstants.SIGNED_AUTHORIZED_OID,
+            QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+            QueryStringConstants.SIGNED_CORRELATION_ID,
+            QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+        ]
+
+
+class SharedAccessSignature(object):
+    '''
+    Provides a factory for creating account access
+    signature tokens with an account name and account key. Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    '''
+
+    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+        '''
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param str account_key:
+            The access key to generate the shares access signatures.
+        :param str x_ms_version:
+            The service version used to generate the shared access signatures.
+        '''
+        self.account_name = account_name
+        self.account_key = account_key
+        self.x_ms_version = x_ms_version
+
+    def generate_account(
+        self, services,
+        resource_types,
+        permission,
+        expiry,
+        start=None,
+        ip=None,
+        protocol=None,
+        sts_hook=None,
+        **kwargs
+    ) -> str:
+        '''
+        Generates a shared access signature for the account.
+        Use the returned signature with the sas_token parameter of the service
+        or to create a new account object.
+
+        :param Any services: The specified services associated with the shared access signature.
+        :param ResourceTypes resource_types:
+            Specifies the resource types that are accessible with the account
+            SAS. You can combine values to provide access to more than one
+            resource type.
+        :param AccountSasPermissions permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy. You can combine
+            values to provide more than one permission.
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :keyword str encryption_scope:
+            Optional. If specified, this is the encryption scope to use when sending requests
+            authorized with this SAS URI.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        '''
+        sas = _SharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_account(services, resource_types)
+        sas.add_encryption_scope(**kwargs)
+        sas.add_account_signature(self.account_name, self.account_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+    def __init__(self):
+        self.query_dict = {}
+        self.string_to_sign = ""
+
+    def _add_query(self, name, val):
+        if val:
+            self.query_dict[name] = str(val) if val is not None else None
+
+    def add_encryption_scope(self, **kwargs):
+        self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop('encryption_scope', None))
+
+    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+        if isinstance(start, date):
+            start = _to_utc_datetime(start)
+
+        if isinstance(expiry, date):
+            expiry = _to_utc_datetime(expiry)
+
+        self._add_query(QueryStringConstants.SIGNED_START, start)
+        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+        self._add_query(QueryStringConstants.SIGNED_IP, ip)
+        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+    def add_resource(self, resource):
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+    def add_id(self, policy_id):
+        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+    def add_account(self, services, resource_types):
+        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+    def add_override_response_headers(self, cache_control,
+                                      content_disposition,
+                                      content_encoding,
+                                      content_language,
+                                      content_type):
+        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+    def add_account_signature(self, account_name, account_key):
+        def get_value_to_append(query):
+            return_value = self.query_dict.get(query) or ''
+            return return_value + '\n'
+
+        string_to_sign = \
+            (account_name + '\n' +
+             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+             get_value_to_append(QueryStringConstants.SIGNED_START) +
+             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE))
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key, string_to_sign))
+        self.string_to_sign = string_to_sign
+
+    def get_token(self) -> str:
+        return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None])
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads.py
new file mode 100644
index 00000000..b31cfb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads.py
@@ -0,0 +1,604 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from concurrent import futures
+from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation
+from itertools import islice
+from math import ceil
+from threading import Lock
+
+from azure.core.tracing.common import with_current_context
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(executor.submit(with_current_context(uploader), next_chunk))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    done, _running = futures.wait(running)
+    range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        validate_content=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        validate_content=validate_content,
+        progress_hook=progress_hook,
+        **kwargs)
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_chunk_streams()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_chunk), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_substream_blocks()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_substream_block), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+    if any(range_ids):
+        return sorted(range_ids)
+    return []
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b""
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if not isinstance(temp, bytes):
+                    raise TypeError("Blob data should be of type bytes.")
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b"" or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    def _update_progress(self, length):
+        if self.progress_lock is not None:
+            with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = self._upload_chunk(chunk_offset, chunk_data)
+        self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    def process_substream_block(self, block_data):
+        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = self._upload_substream_block(index, block_stream)
+        self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop("modified_access_conditions", None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return index, block_id
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        return not any(bytearray(chunk_data))
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f"bytes={chunk_offset}-{chunk_end}"
+            computed_md5 = None
+            self.response_headers = self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+            self.current_length = int(self.response_headers["blob_append_offset"])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        self.response_headers = self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return f'bytes={chunk_offset}-{chunk_end}', response
+
+    # TODO: Implement this method.
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class SubStream(IOBase):
+
+    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+        # Python 2.7: file-like objects created with open() typically support seek(), but are not
+        # derivations of io.IOBase and thus do not implement seekable().
+        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+        try:
+            # only the main thread runs this, so there's no need grabbing the lock
+            wrapped_stream.seek(0, SEEK_CUR)
+        except Exception as exc:
+            raise ValueError("Wrapped stream must support seek().") from exc
+
+        self._lock = lockObj
+        self._wrapped_stream = wrapped_stream
+        self._position = 0
+        self._stream_begin_index = stream_begin_index
+        self._length = length
+        self._buffer = BytesIO()
+
+        # we must avoid buffering more than necessary, and also not use up too much memory
+        # so the max buffer size is capped at 4MB
+        self._max_buffer_size = (
+            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+        )
+        self._current_buffer_start = 0
+        self._current_buffer_size = 0
+        super(SubStream, self).__init__()
+
+    def __len__(self):
+        return self._length
+
+    def close(self):
+        if self._buffer:
+            self._buffer.close()
+        self._wrapped_stream = None
+        IOBase.close(self)
+
+    def fileno(self):
+        return self._wrapped_stream.fileno()
+
+    def flush(self):
+        pass
+
+    def read(self, size=None):
+        if self.closed:  # pylint: disable=using-constant-test
+            raise ValueError("Stream is closed.")
+
+        if size is None:
+            size = self._length - self._position
+
+        # adjust if out of bounds
+        if size + self._position >= self._length:
+            size = self._length - self._position
+
+        # return fast
+        if size == 0 or self._buffer.closed:
+            return b""
+
+        # attempt first read from the read buffer and update position
+        read_buffer = self._buffer.read(size)
+        bytes_read = len(read_buffer)
+        bytes_remaining = size - bytes_read
+        self._position += bytes_read
+
+        # repopulate the read buffer from the underlying stream to fulfill the request
+        # ensure the seek and read operations are done atomically (only if a lock is provided)
+        if bytes_remaining > 0:
+            with self._buffer:
+                # either read in the max buffer size specified on the class
+                # or read in just enough data for the current block/sub stream
+                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+                # lock is only defined if max_concurrency > 1 (parallel uploads)
+                if self._lock:
+                    with self._lock:
+                        # reposition the underlying stream to match the start of the data to read
+                        absolute_position = self._stream_begin_index + self._position
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+                        # If we can't seek to the right location, our read will be corrupted so fail fast.
+                        if self._wrapped_stream.tell() != absolute_position:
+                            raise IOError("Stream failed to seek to the desired location.")
+                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+                else:
+                    absolute_position = self._stream_begin_index + self._position
+                    # It's possible that there's connection problem during data transfer,
+                    # so when we retry we don't want to read from current position of wrapped stream,
+                    # instead we should seek to where we want to read from.
+                    if self._wrapped_stream.tell() != absolute_position:
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+
+                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+            if buffer_from_stream:
+                # update the buffer with new data from the wrapped stream
+                # we need to note down the start position and size of the buffer, in case seek is performed later
+                self._buffer = BytesIO(buffer_from_stream)
+                self._current_buffer_start = self._position
+                self._current_buffer_size = len(buffer_from_stream)
+
+                # read the remaining bytes from the new buffer and update position
+                second_read_buffer = self._buffer.read(bytes_remaining)
+                read_buffer += second_read_buffer
+                self._position += len(second_read_buffer)
+
+        return read_buffer
+
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        raise UnsupportedOperation
+
+    def seek(self, offset, whence=0):
+        if whence is SEEK_SET:
+            start_index = 0
+        elif whence is SEEK_CUR:
+            start_index = self._position
+        elif whence is SEEK_END:
+            start_index = self._length
+            offset = -offset
+        else:
+            raise ValueError("Invalid argument for the 'whence' parameter.")
+
+        pos = start_index + offset
+
+        if pos > self._length:
+            pos = self._length
+        elif pos < 0:
+            pos = 0
+
+        # check if buffer is still valid
+        # if not, drop buffer
+        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+            self._buffer.close()
+            self._buffer = BytesIO()
+        else:  # if yes seek to correct position
+            delta = pos - self._current_buffer_start
+            self._buffer.seek(delta, SEEK_SET)
+
+        self._position = pos
+        return pos
+
+    def seekable(self):
+        return True
+
+    def tell(self):
+        return self._position
+
+    def write(self):
+        raise UnsupportedOperation
+
+    def writelines(self):
+        raise UnsupportedOperation
+
+    def writeable(self):
+        return False
+
+
+class IterStreamer(object):
+    """
+    File-like streaming iterator.
+    """
+
+    def __init__(self, generator, encoding="UTF-8"):
+        self.generator = generator
+        self.iterator = iter(generator)
+        self.leftover = b""
+        self.encoding = encoding
+
+    def __len__(self):
+        return self.generator.__len__()
+
+    def __iter__(self):
+        return self.iterator
+
+    def seekable(self):
+        return False
+
+    def __next__(self):
+        return next(self.iterator)
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    def read(self, size):
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = self.__next__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads_async.py
new file mode 100644
index 00000000..3e102ec5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared/uploads_async.py
@@ -0,0 +1,460 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import asyncio
+import inspect
+import threading
+from asyncio import Lock
+from io import UnsupportedOperation
+from itertools import islice
+from math import ceil
+from typing import AsyncGenerator, Union
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
+
+
+async def _async_parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = await pending.__anext__()
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopAsyncIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def _parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_chunk_streams()
+        running_futures = []
+        for _ in range(max_concurrency):
+            try:
+                chunk = await upload_tasks.__anext__()
+                running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk)))
+            except StopAsyncIteration:
+                break
+
+        range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        async for chunk in uploader.get_chunk_streams():
+            range_ids.append(await uploader.process_chunk(chunk))
+
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+async def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_substream_blocks()
+        running_futures = [
+            asyncio.ensure_future(uploader.process_substream_block(u))
+            for u in islice(upload_tasks, 0, max_concurrency)
+        ]
+        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        for block in uploader.get_substream_blocks():
+            range_ids.append(await uploader.process_substream_block(block))
+    if any(range_ids):
+        return sorted(range_ids)
+    return
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = threading.Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    async def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b''
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if inspect.isawaitable(temp):
+                    temp = await temp
+                if not isinstance(temp, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b'' or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    async def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    async def _update_progress(self, length):
+        if self.progress_lock is not None:
+            async with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await self.progress_hook(self.progress_total, self.total_size)
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = await self._upload_chunk(chunk_offset, chunk_data)
+        await self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    async def process_substream_block(self, block_data):
+        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    async def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = await self._upload_substream_block(index, block_stream)
+        await self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop('modified_access_conditions', None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        await self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            body=chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options)
+        return index, block_id
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            await self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        for each_byte in chunk_data:
+            if each_byte not in [0, b'\x00']:
+                return False
+        return True
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f'bytes={chunk_offset}-{chunk_end}'
+            computed_md5 = None
+            self.response_headers = await self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+            self.current_length = int(self.response_headers['blob_append_offset'])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        self.response_headers = await self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            await self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = await self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        range_id = f'bytes={chunk_offset}-{chunk_end}'
+        return range_id, response
+
+    # TODO: Implement this method.
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AsyncIterStreamer():
+    """
+    File-like streaming object for AsyncGenerators.
+    """
+    def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"):
+        self.iterator = generator.__aiter__()
+        self.leftover = b""
+        self.encoding = encoding
+
+    def seekable(self):
+        return False
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    async def read(self, size: int) -> bytes:
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = await self.iterator.__anext__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopAsyncIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared_access_signature.py
new file mode 100644
index 00000000..a3005be2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_shared_access_signature.py
@@ -0,0 +1,699 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Any, Callable, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import parse_qs
+
+from ._shared import sign_string, url_quote
+from ._shared.constants import X_MS_VERSION
+from ._shared.models import Services, UserDelegationKey
+from ._shared.shared_access_signature import QueryStringConstants, SharedAccessSignature, _SharedAccessHelper
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from ..blob import AccountSasPermissions, BlobSasPermissions, ContainerSasPermissions, ResourceTypes
+
+
+class BlobQueryStringConstants(object):
+    SIGNED_TIMESTAMP = 'snapshot'
+
+
+class BlobSharedAccessSignature(SharedAccessSignature):
+    '''
+    Provides a factory for creating blob and container access
+    signature tokens with a common account name and account key.  Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    '''
+
+    def __init__(
+        self, account_name: str,
+        account_key: Optional[str] = None,
+        user_delegation_key: Optional[UserDelegationKey] = None
+    ) -> None:
+        '''
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param Optional[str] account_key:
+            The access key to generate the shares access signatures.
+        :param Optional[~azure.storage.blob.models.UserDelegationKey] user_delegation_key:
+            Instead of an account key, the user could pass in a user delegation key.
+            A user delegation key can be obtained from the service by authenticating with an AAD identity;
+            this can be accomplished by calling get_user_delegation_key on any Blob service object.
+        '''
+        super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
+        self.user_delegation_key = user_delegation_key
+
+    def generate_blob(
+        self, container_name: str,
+        blob_name: str,
+        snapshot: Optional[str] = None,
+        version_id: Optional[str] = None,
+        permission: Optional[Union["BlobSasPermissions", str]] = None,
+        expiry: Optional[Union["datetime", str]] = None,
+        start: Optional[Union["datetime", str]] = None,
+        policy_id: Optional[str] = None,
+        ip: Optional[str] = None,
+        protocol: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_type: Optional[str] = None,
+        sts_hook: Optional[Callable[[str], None]] = None,
+        **kwargs: Any
+    ) -> str:
+        '''
+        Generates a shared access signature for the blob or one of its snapshots.
+        Use the returned signature with the sas_token parameter of any BlobService.
+
+        :param str container_name:
+            Name of container.
+        :param str blob_name:
+            Name of blob.
+        :param str snapshot:
+            The snapshot parameter is an opaque datetime value that,
+            when present, specifies the blob snapshot to grant permission.
+        :param str version_id:
+            An optional blob version ID. This parameter is only applicable for versioning-enabled
+            Storage accounts. Note that the 'versionid' query parameter is not included in the output
+            SAS. Therefore, please provide the 'version_id' parameter to any APIs when using the output
+            SAS to operate on a specific version.
+        :param permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Permissions must be ordered racwdxytmei.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy.
+        :type permission: str or BlobSasPermissions
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str policy_id:
+            A unique value up to 64 characters in length that correlates to a
+            stored access policy. To create a stored access policy, use
+            set_blob_service_properties.
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :param str cache_control:
+            Response header value for Cache-Control when resource is accessed
+            using this shared access signature.
+        :param str content_disposition:
+            Response header value for Content-Disposition when resource is accessed
+            using this shared access signature.
+        :param str content_encoding:
+            Response header value for Content-Encoding when resource is accessed
+            using this shared access signature.
+        :param str content_language:
+            Response header value for Content-Language when resource is accessed
+            using this shared access signature.
+        :param str content_type:
+            Response header value for Content-Type when resource is accessed
+            using this shared access signature.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :return: A Shared Access Signature (sas) token.
+        :rtype: str
+        '''
+        resource_path = container_name + '/' + blob_name
+
+        sas = _BlobSharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_id(policy_id)
+
+        resource = 'bs' if snapshot else 'b'
+        resource = 'bv' if version_id else resource
+        resource = 'd' if kwargs.pop("is_directory", None) else resource
+        sas.add_resource(resource)
+
+        sas.add_timestamp(snapshot or version_id)
+        sas.add_override_response_headers(cache_control, content_disposition,
+                                          content_encoding, content_language,
+                                          content_type)
+        sas.add_encryption_scope(**kwargs)
+        sas.add_info_for_hns_account(**kwargs)
+        sas.add_resource_signature(self.account_name, self.account_key, resource_path,
+                                   user_delegation_key=self.user_delegation_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+    def generate_container(
+        self, container_name: str,
+        permission: Optional[Union["ContainerSasPermissions", str]] = None,
+        expiry: Optional[Union["datetime", str]] = None,
+        start: Optional[Union["datetime", str]] = None,
+        policy_id: Optional[str] = None,
+        ip: Optional[str] = None,
+        protocol: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_type: Optional[str] = None,
+        sts_hook: Optional[Callable[[str], None]] = None,
+        **kwargs: Any
+    ) -> str:
+        '''
+        Generates a shared access signature for the container.
+        Use the returned signature with the sas_token parameter of any BlobService.
+
+        :param str container_name:
+            Name of container.
+        :param permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Permissions must be ordered racwdxyltfmei.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy.
+        :type permission: str or ContainerSasPermissions
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str policy_id:
+            A unique value up to 64 characters in length that correlates to a
+            stored access policy. To create a stored access policy, use
+            set_blob_service_properties.
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :param str cache_control:
+            Response header value for Cache-Control when resource is accessed
+            using this shared access signature.
+        :param str content_disposition:
+            Response header value for Content-Disposition when resource is accessed
+            using this shared access signature.
+        :param str content_encoding:
+            Response header value for Content-Encoding when resource is accessed
+            using this shared access signature.
+        :param str content_language:
+            Response header value for Content-Language when resource is accessed
+            using this shared access signature.
+        :param str content_type:
+            Response header value for Content-Type when resource is accessed
+            using this shared access signature.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :return: A Shared Access Signature (sas) token.
+        :rtype: str
+        '''
+        sas = _BlobSharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_id(policy_id)
+        sas.add_resource('c')
+        sas.add_override_response_headers(cache_control, content_disposition,
+                                          content_encoding, content_language,
+                                          content_type)
+        sas.add_encryption_scope(**kwargs)
+        sas.add_info_for_hns_account(**kwargs)
+        sas.add_resource_signature(self.account_name, self.account_key, container_name,
+                                   user_delegation_key=self.user_delegation_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _BlobSharedAccessHelper(_SharedAccessHelper):
+
+    def add_timestamp(self, timestamp):
+        self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp)
+
+    def add_info_for_hns_account(self, **kwargs):
+        self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None))
+        self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None))
+        self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None))
+        self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None))
+
+    def get_value_to_append(self, query):
+        return_value = self.query_dict.get(query) or ''
+        return return_value + '\n'
+
+    def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None):
+        if path[0] != '/':
+            path = '/' + path
+
+        canonicalized_resource = '/blob/' + account_name + path + '\n'
+
+        # Form the string to sign from shared_access_policy and canonicalized
+        # resource. The order of values is important.
+        string_to_sign = \
+            (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_START) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             canonicalized_resource)
+
+        if user_delegation_key is not None:
+            self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid)
+            self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid)
+            self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start)
+            self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry)
+            self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service)
+            self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version)
+
+            string_to_sign += \
+                (self.get_value_to_append(QueryStringConstants.SIGNED_OID) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_TID) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) +
+                 self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID))
+        else:
+            string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER)
+
+        string_to_sign += \
+            (self.get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) +
+             self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
+             self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
+
+        # remove the trailing newline
+        if string_to_sign[-1] == '\n':
+            string_to_sign = string_to_sign[:-1]
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key if user_delegation_key is None else user_delegation_key.value,
+                                    string_to_sign))
+        self.string_to_sign = string_to_sign
+
+    def get_token(self) -> str:
+        # a conscious decision was made to exclude the timestamp in the generated token
+        # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp
+        exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP]
+        return '&'.join([f'{n}={url_quote(v)}'
+                         for n, v in self.query_dict.items() if v is not None and n not in exclude])
+
+
+def generate_account_sas(
+    account_name: str,
+    account_key: str,
+    resource_types: Union["ResourceTypes", str],
+    permission: Union["AccountSasPermissions", str],
+    expiry: Union["datetime", str],
+    start: Optional[Union["datetime", str]] = None,
+    ip: Optional[str] = None,
+    *,
+    services: Union[Services, str] = Services(blob=True),
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for the blob service.
+
+    Use the returned signature with the credential parameter of any BlobServiceClient,
+    ContainerClient or BlobClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+    :param resource_types:
+        Specifies the resource types that are accessible with the account SAS.
+    :type resource_types: str or ~azure.storage.blob.ResourceTypes
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+    :type permission: str or ~azure.storage.blob.AccountSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        The provided datetime will always be interpreted as UTC.
+    :type expiry: ~datetime.datetime or str
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str
+    :param str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword Union[Services, str] services:
+        Specifies the services that the Shared Access Signature (sas) token will be able to be utilized with.
+        Will default to only this package (i.e. blobs) if not provided.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication.py
+            :start-after: [START create_sas_token]
+            :end-before: [END create_sas_token]
+            :language: python
+            :dedent: 8
+            :caption: Generating a shared access signature.
+    """
+    sas = SharedAccessSignature(account_name, account_key)
+    return sas.generate_account(
+        services=services,
+        resource_types=resource_types,
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_container_sas(
+    account_name: str,
+    container_name: str,
+    account_key: Optional[str] = None,
+    user_delegation_key: Optional[UserDelegationKey] = None,
+    permission: Optional[Union["ContainerSasPermissions", str]] = None,
+    expiry: Optional[Union["datetime", str]] = None,
+    start: Optional[Union["datetime", str]] = None,
+    policy_id: Optional[str] = None,
+    ip: Optional[str] = None,
+    *,
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for a container.
+
+    Use the returned signature with the credential parameter of any BlobServiceClient,
+    ContainerClient or BlobClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str container_name:
+        The name of the container.
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+        Either `account_key` or `user_delegation_key` must be specified.
+    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
+        Instead of an account shared key, the user could pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdxyltfmei.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.blob.ContainerSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str
+    :param str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
+    :param str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS. This can only be used when generating a SAS with delegation key.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_containers.py
+            :start-after: [START generate_sas_token]
+            :end-before: [END generate_sas_token]
+            :language: python
+            :dedent: 12
+            :caption: Generating a sas token.
+    """
+    if not policy_id:
+        if not expiry:
+            raise ValueError("'expiry' parameter must be provided when not using a stored access policy.")
+        if not permission:
+            raise ValueError("'permission' parameter must be provided when not using a stored access policy.")
+    if not user_delegation_key and not account_key:
+        raise ValueError("Either user_delegation_key or account_key must be provided.")
+    if isinstance(account_key, UserDelegationKey):
+        user_delegation_key = account_key
+    if user_delegation_key:
+        sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
+    else:
+        sas = BlobSharedAccessSignature(account_name, account_key=account_key)
+    return sas.generate_container(
+        container_name,
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        policy_id=policy_id,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_blob_sas(
+    account_name: str,
+    container_name: str,
+    blob_name: str,
+    snapshot: Optional[str] = None,
+    account_key: Optional[str] = None,
+    user_delegation_key: Optional[UserDelegationKey] = None,
+    permission: Optional[Union["BlobSasPermissions", str]] = None,
+    expiry: Optional[Union["datetime", str]] = None,
+    start: Optional[Union["datetime", str]] = None,
+    policy_id: Optional[str] = None,
+    ip: Optional[str] = None,
+    *,
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for a blob.
+
+    Use the returned signature with the credential parameter of any BlobServiceClient,
+    ContainerClient or BlobClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str container_name:
+        The name of the container.
+    :param str blob_name:
+        The name of the blob.
+    :param str snapshot:
+        An optional blob snapshot ID.
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+        Either `account_key` or `user_delegation_key` must be specified.
+    :param ~azure.storage.blob.UserDelegationKey user_delegation_key:
+        Instead of an account shared key, the user could pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdxytmei.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.blob.BlobSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str
+    :param str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`.
+    :param str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str version_id:
+        An optional blob version ID. This parameter is only applicable for versioning-enabled
+        Storage accounts. Note that the 'versionid' query parameter is not included in the output
+        SAS. Therefore, please provide the 'version_id' parameter to any APIs when using the output
+        SAS to operate on a specific version.
+
+        .. versionadded:: 12.4.0
+            This keyword argument was introduced in API version '2019-12-12'.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS. This can only be used when generating a SAS with delegation key.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    if not policy_id:
+        if not expiry:
+            raise ValueError("'expiry' parameter must be provided when not using a stored access policy.")
+        if not permission:
+            raise ValueError("'permission' parameter must be provided when not using a stored access policy.")
+    if not user_delegation_key and not account_key:
+        raise ValueError("Either user_delegation_key or account_key must be provided.")
+    if isinstance(account_key, UserDelegationKey):
+        user_delegation_key = account_key
+    version_id = kwargs.pop('version_id', None)
+    if version_id and snapshot:
+        raise ValueError("snapshot and version_id cannot be set at the same time.")
+    if user_delegation_key:
+        sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
+    else:
+        sas = BlobSharedAccessSignature(account_name, account_key=account_key)
+    return sas.generate_blob(
+        container_name,
+        blob_name,
+        snapshot=snapshot,
+        version_id=version_id,
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        policy_id=policy_id,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+def _is_credential_sastoken(credential: Any) -> bool:
+    if not credential or not isinstance(credential, str):
+        return False
+
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = parse_qs(credential.lstrip("?"))
+    if parsed_query and all(k in sas_values for k in parsed_query):
+        return True
+    return False
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_upload_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_upload_helpers.py
new file mode 100644
index 00000000..2ce55f7a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_upload_helpers.py
@@ -0,0 +1,354 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from io import SEEK_SET, UnsupportedOperation
+from typing import Any, cast, Dict, IO, Optional, TypeVar, TYPE_CHECKING
+
+from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError
+
+from ._encryption import (
+    _ENCRYPTION_PROTOCOL_V1,
+    _ENCRYPTION_PROTOCOL_V2,
+    encrypt_blob,
+    GCMBlobEncryptionStream,
+    generate_blob_encryption_data,
+    get_adjusted_upload_size,
+    get_blob_encryptor_and_padder
+)
+from ._generated.models import (
+    AppendPositionAccessConditions,
+    BlockLookupList,
+    ModifiedAccessConditions
+)
+from ._shared.models import StorageErrorCode
+from ._shared.response_handlers import process_storage_error, return_response_headers
+from ._shared.uploads import (
+    AppendBlobChunkUploader,
+    BlockBlobChunkUploader,
+    PageBlobChunkUploader,
+    upload_data_chunks,
+    upload_substream_blocks
+)
+
+if TYPE_CHECKING:
+    from ._generated.operations import AppendBlobOperations, BlockBlobOperations, PageBlobOperations
+    from ._shared.models import StorageConfiguration
+    BlobLeaseClient = TypeVar("BlobLeaseClient")
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
+
+
+def _convert_mod_error(error):
+    message = error.message.replace(
+        "The condition specified using HTTP conditional header(s) is not met.",
+        "The specified blob already exists.")
+    message = message.replace("ConditionNotMet", "BlobAlreadyExists")
+    overwrite_error = ResourceExistsError(
+        message=message,
+        response=error.response,
+        error=error)
+    overwrite_error.error_code = StorageErrorCode.blob_already_exists
+    raise overwrite_error
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
+    return any([
+        modified_access_conditions.if_modified_since,
+        modified_access_conditions.if_unmodified_since,
+        modified_access_conditions.if_none_match,
+        modified_access_conditions.if_match
+    ])
+
+
+def upload_block_blob(  # pylint: disable=too-many-locals, too-many-statements
+    client: "BlockBlobOperations",
+    stream: IO,
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    validate_content: bool,
+    max_concurrency: Optional[int],
+    length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        adjusted_count = length
+        if (encryption_options.get('key') is not None) and (adjusted_count is not None):
+            adjusted_count = get_adjusted_upload_size(adjusted_count, encryption_options['version'])
+        blob_headers = kwargs.pop('blob_headers', None)
+        tier = kwargs.pop('standard_blob_tier', None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+        immutability_policy = kwargs.pop('immutability_policy', None)
+        immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time
+        immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode
+        legal_hold = kwargs.pop('legal_hold', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        # Do single put if the size is smaller than or equal config.max_single_put_size
+        if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size):
+            data = stream.read(length or -1)
+            if not isinstance(data, bytes):
+                raise TypeError('Blob data should be of type bytes.')
+
+            if encryption_options.get('key'):
+                encryption_data, data = encrypt_blob(data, encryption_options['key'], encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_data
+
+            response = client.upload(
+                body=data,  # type: ignore [arg-type]
+                content_length=adjusted_count,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                cls=return_response_headers,
+                validate_content=validate_content,
+                data_stream_total=adjusted_count,
+                upload_stream_current=0,
+                tier=tier.value if tier else None,
+                blob_tags_string=blob_tags_string,
+                immutability_policy_expiry=immutability_policy_expiry,
+                immutability_policy_mode=immutability_policy_mode,
+                legal_hold=legal_hold,
+                **kwargs)
+
+            if progress_hook:
+                progress_hook(adjusted_count, adjusted_count)
+
+            return cast(Dict[str, Any], response)
+
+        use_original_upload_path = blob_settings.use_byte_buffer or \
+            validate_content or encryption_options.get('required') or \
+            blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            total_size = length
+            encryptor, padder = None, None
+            if encryption_options and encryption_options.get('key'):
+                cek, iv, encryption_metadata = generate_blob_encryption_data(
+                    encryption_options['key'],
+                    encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_metadata
+
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                    encryptor, padder = get_blob_encryptor_and_padder(cek, iv, True)
+
+                # Adjust total_size for encryption V2
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V2:
+                    # Adjust total_size for encryption V2
+                    total_size = adjusted_count
+                    # V2 wraps the data stream with an encryption stream
+                    if cek is None:
+                        raise ValueError("Generate encryption metadata failed. 'cek' is None.")
+                    stream = GCMBlobEncryptionStream(cek, stream)  # type: ignore [assignment]
+
+            block_ids = upload_data_chunks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=total_size,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                encryptor=encryptor,
+                padder=padder,
+                headers=headers,
+                **kwargs
+            )
+        else:
+            block_ids = upload_substream_blocks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs
+            )
+
+        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+        block_lookup.latest = block_ids
+        return cast(Dict[str, Any], client.commit_block_list(
+            block_lookup,
+            blob_http_headers=blob_headers,
+            cls=return_response_headers,
+            validate_content=validate_content,
+            headers=headers,
+            tier=tier.value if tier else None,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            **kwargs))
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+def upload_page_blob(
+    client: "PageBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        if length is None or length < 0:
+            raise ValueError("A content length must be specified for a Page Blob.")
+        if length % 512 != 0:
+            raise ValueError(f"Invalid page blob size: {length}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        tier = None
+        if kwargs.get('premium_page_blob_tier'):
+            premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
+            try:
+                tier = premium_page_blob_tier.value
+            except AttributeError:
+                tier = premium_page_blob_tier
+
+        if encryption_options and encryption_options.get('key'):
+            cek, iv, encryption_data = generate_blob_encryption_data(
+                encryption_options['key'],
+                encryption_options['version'])
+            headers['x-ms-meta-encryptiondata'] = encryption_data
+
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        response = cast(Dict[str, Any], client.create(
+            content_length=0,
+            blob_content_length=length,
+            blob_sequence_number=None,  # type: ignore [arg-type]
+            blob_http_headers=kwargs.pop('blob_headers', None),
+            blob_tags_string=blob_tags_string,
+            tier=tier,
+            cls=return_response_headers,
+            headers=headers,
+            **kwargs))
+        if length == 0:
+            return cast(Dict[str, Any], response)
+
+        if encryption_options and encryption_options.get('key'):
+            if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                encryptor, padder = get_blob_encryptor_and_padder(cek, iv, False)
+                kwargs['encryptor'] = encryptor
+                kwargs['padder'] = padder
+
+        kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
+        return cast(Dict[str, Any], upload_data_chunks(
+            service=client,
+            uploader_class=PageBlobChunkUploader,
+            total_size=length,
+            chunk_size=blob_settings.max_page_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            headers=headers,
+            **kwargs))
+
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+def upload_append_blob(  # pylint: disable=unused-argument
+    client: "AppendBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if length == 0:
+            return {}
+        blob_headers = kwargs.pop('blob_headers', None)
+        append_conditions = AppendPositionAccessConditions(
+            max_size=kwargs.pop('maxsize_condition', None),
+            append_position=None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        try:
+            if overwrite:
+                client.create(
+                    content_length=0,
+                    blob_http_headers=blob_headers,
+                    headers=headers,
+                    blob_tags_string=blob_tags_string,
+                    **kwargs)
+            return cast(Dict[str, Any], upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            if error.response.status_code != 404:  # type: ignore [union-attr]
+                raise
+            # rewind the request body if it is a stream
+            if hasattr(stream, 'read'):
+                try:
+                    # attempt to rewind the body to the initial position
+                    stream.seek(0, SEEK_SET)
+                except UnsupportedOperation as exc:
+                    # if body is not seekable, then retry would not work
+                    raise error from exc
+            client.create(
+                content_length=0,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                blob_tags_string=blob_tags_string,
+                **kwargs)
+            return cast(Dict[str, Any], upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/_version.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/_version.py
new file mode 100644
index 00000000..bb6f0bf8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.25.0"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py
new file mode 100644
index 00000000..a755e6a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py
@@ -0,0 +1,166 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import os
+
+from typing import Any, AnyStr, Dict, cast, IO, Iterable, Optional, Union, TYPE_CHECKING
+from ._list_blobs_helper import BlobPrefix
+from .._models import BlobType
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._blob_client_async import BlobClient
+from ._container_client_async import ContainerClient
+from ._blob_service_client_async import BlobServiceClient
+from ._lease_async import BlobLeaseClient
+from ._download_async import StorageStreamDownloader
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+
+
+async def upload_blob_to_url(
+    blob_url: str,
+    data: Union[Iterable[AnyStr], IO[AnyStr]],
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+    **kwargs: Any
+) -> Dict[str, Any]:
+    """Upload data to a given URL
+
+    The data will be uploaded as a block blob.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param data:
+        The data to upload. This can be bytes, text, an iterable or a file-like object.
+    :type data: bytes or str or Iterable
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the blob to be uploaded should overwrite the current data.
+        If True, upload_blob_to_url will overwrite any existing data. If set to False, the
+        operation will fail with a ResourceExistsError.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword dict(str,str) metadata:
+        Name-value pairs associated with the blob as metadata.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :keyword str encoding:
+        Encoding to use if text is supplied as input. Defaults to UTF-8.
+    :returns: Blob-updated property dict (Etag and last modified)
+    :rtype: dict[str, Any]
+    """
+    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        return await cast(BlobClient, client).upload_blob(
+            data=data,
+            blob_type=BlobType.BLOCKBLOB,
+            **kwargs)
+
+
+# Download data to specified open file-handle.
+async def _download_to_stream(client, handle, **kwargs):
+    stream = await client.download_blob(**kwargs)
+    await stream.readinto(handle)
+
+
+async def download_blob_from_url(
+    blob_url: str,
+    output: str,
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None, # pylint: disable=line-too-long
+    **kwargs: Any
+) -> None:
+    """Download the contents of a blob to a local file or stream.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param output:
+        Where the data should be downloaded to. This could be either a file path to write to,
+        or an open IO handle to write to.
+    :type output: str or writable stream
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the local file should be overwritten if it already exists. The default value is
+        `False` - in which case a ValueError will be raised if the file already exists. If set to
+        `True`, an attempt will be made to write to the existing file. If a stream handle is passed
+        in, this value is ignored.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int offset:
+        Start of byte range to use for downloading a section of the blob.
+        Must be set if length is provided.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :rtype: None
+    """
+    overwrite = kwargs.pop('overwrite', False)
+    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        if hasattr(output, 'write'):
+            await _download_to_stream(client, output, **kwargs)
+        else:
+            if not overwrite and os.path.isfile(output):
+                raise ValueError(f"The file '{output}' already exists.")
+            with open(output, 'wb') as file_handle:
+                await _download_to_stream(client, file_handle, **kwargs)
+
+
+__all__ = [
+    'upload_blob_to_url',
+    'download_blob_from_url',
+    'BlobServiceClient',
+    'BlobPrefix',
+    'ContainerClient',
+    'BlobClient',
+    'BlobLeaseClient',
+    'ExponentialRetry',
+    'LinearRetry',
+    'StorageStreamDownloader'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py
new file mode 100644
index 00000000..7cb07448
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py
@@ -0,0 +1,3215 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import warnings
+from datetime import datetime
+from functools import partial
+from typing import (
+    Any, AnyStr, AsyncIterable, cast, Dict, IO, Iterable, List, Optional, overload, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._download_async import StorageStreamDownloader
+from ._lease_async import BlobLeaseClient
+from ._models import PageRangePaged
+from ._upload_helpers import (
+    upload_append_blob,
+    upload_block_blob,
+    upload_page_blob
+)
+from .._blob_client import StorageAccountHostsMixin
+from .._blob_client_helpers import (
+    _abort_copy_options,
+    _append_block_from_url_options,
+    _append_block_options,
+    _clear_page_options,
+    _commit_block_list_options,
+    _create_append_blob_options,
+    _create_page_blob_options,
+    _create_snapshot_options,
+    _delete_blob_options,
+    _download_blob_options,
+    _format_url,
+    _from_blob_url,
+    _get_blob_tags_options,
+    _get_block_list_result,
+    _get_page_ranges_options,
+    _parse_url,
+    _resize_blob_options,
+    _seal_append_blob_options,
+    _set_blob_metadata_options,
+    _set_blob_tags_options,
+    _set_http_headers_options,
+    _set_sequence_number_options,
+    _stage_block_from_url_options,
+    _stage_block_options,
+    _start_copy_from_url_options,
+    _upload_blob_from_url_options,
+    _upload_blob_options,
+    _upload_page_options,
+    _upload_pages_from_url_options
+)
+from .._deserialize import (
+    deserialize_blob_properties,
+    deserialize_pipeline_response_into_cls,
+    get_page_ranges_result,
+    parse_tags
+)
+from .._encryption import StorageEncryptionMixin, _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import CpkInfo
+from .._models import BlobType, BlobBlock, BlobProperties, PageRange
+from .._serialize import get_access_conditions, get_api_version, get_modify_conditions, get_version_id
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.response_handlers import process_storage_error, return_response_headers
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.policies import AsyncHTTPPolicy
+    from azure.storage.blob.aio import ContainerClient
+    from .._models import (
+        ContentSettings,
+        ImmutabilityPolicy,
+        PremiumPageBlobTier,
+        SequenceNumberAction,
+        StandardBlobTier
+    )
+
+
+class BlobClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin, StorageEncryptionMixin):  # type: ignore [misc] # pylint: disable=too-many-public-methods
+    """A client to interact with a specific blob, although that blob may not yet exist.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the blob,
+        use the :func:`from_blob_url` classmethod.
+    :param container_name: The container name for the blob.
+    :type container_name: str
+    :param blob_name: The name of the blob with which to interact. If specified, this value will override
+        a blob value specified in the blob URL.
+    :type blob_name: str
+    :param str snapshot:
+        The optional blob snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+        specifies the version of the blob to operate on.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_client]
+            :end-before: [END create_blob_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_client_sas_url]
+            :end-before: [END create_blob_client_sas_url]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a SAS URL to a blob.
+    """
+    def __init__(
+            self, account_url: str,
+            container_name: str,
+            blob_name: str,
+            snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token, path_snapshot = _parse_url(
+            account_url=account_url,
+            container_name=container_name,
+            blob_name=blob_name)
+        self.container_name = container_name
+        self.blob_name = blob_name
+
+        if snapshot is not None and hasattr(snapshot, 'snapshot'):
+            self.snapshot = snapshot.snapshot
+        elif isinstance(snapshot, dict):
+            self.snapshot = snapshot['snapshot']
+        else:
+            self.snapshot = snapshot or path_snapshot
+        self.version_id = kwargs.pop('version_id', None)
+
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot)
+        super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        return _format_url(
+            container_name=self.container_name,
+            scheme=self.scheme,
+            blob_name=self.blob_name,
+            query_str=self._query_str,
+            hostname=hostname
+        )
+
+    @classmethod
+    def from_blob_url(
+        cls, blob_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name.
+
+        :param str blob_url:
+            The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type blob_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]]  # pylint: disable=line-too-long
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`. If specified, this will override
+            the snapshot in the url.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+        """
+        account_url, container_name, blob_name, path_snapshot = _from_blob_url(blob_url=blob_url, snapshot=snapshot)
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=path_snapshot, credential=credential, **kwargs
+        )
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        blob_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name: The container name for the blob.
+        :type container_name: str
+        :param blob_name: The name of the blob with which to interact.
+        :type blob_name: str
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]]  # pylint: disable=line-too-long
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_blob]
+                :end-before: [END auth_from_connection_string_blob]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=snapshot, credential=credential, **kwargs
+        )
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account in which the blob resides.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return cast(Dict[str, str],
+                        await self._client.blob.get_account_info(cls=return_response_headers, **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_blob_from_url(
+        self, source_url: str,
+        *,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Creates a new Block Blob where the content of the blob is read from a given URL.
+        The content of an existing blob is overwritten with the new blob.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            The source must either be public or must be authenticated via a shared
+            access signature as part of the url or using the source_authorization keyword.
+            If the source is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :keyword dict(str, str) metadata:
+            Name-value pairs associated with the blob as metadata.
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError.
+        :keyword bool include_source_blob_properties:
+            Indicates if properties from the source blob should be copied. Defaults to True.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :paramtype tags: dict(str, str)
+        :keyword bytearray source_content_md5:
+            Specify the md5 that is used to verify the integrity of the source bytes.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Response from creating a new block blob for a given URL.
+        :rtype: Dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.put_blob_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_blob(
+        self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[bytes]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            If specified, upload_blob only succeeds if the
+            blob's lease is active and matches this ID.
+            Required if the blob has an active lease.
+        :paramtype: ~azure.storage.blob.aio.BlobLeaseClient
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START upload_a_blob]
+                :end-before: [END upload_a_blob]
+                :language: python
+                :dedent: 16
+                :caption: Upload a blob to the container.
+        """
+        if self.require_encryption and not self.key_encryption_key:
+            raise ValueError("Encryption required but no key was provided.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_options(
+            data=data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        if blob_type == BlobType.BlockBlob:
+            return cast(Dict[str, Any], await upload_block_blob(**options))
+        if blob_type == BlobType.PageBlob:
+            return cast(Dict[str, Any], await upload_page_blob(**options))
+        return cast(Dict[str, Any], await upload_append_blob(**options))
+
+    @overload
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace_async
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword str encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START download_a_blob]
+                :end-before: [END download_a_blob]
+                :language: python
+                :dedent: 16
+                :caption: Download a blob.
+        """
+        if self.require_encryption and not (self.key_encryption_key or self.key_resolver_function):
+            raise ValueError("Encryption required but no key was provided.")
+        if length is not None and offset is None:
+            raise ValueError("Offset value must not be None if length is set.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _download_blob_options(
+            blob_name=self.blob_name,
+            container_name=self.container_name,
+            version_id=get_version_id(self.version_id, kwargs),
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        downloader = StorageStreamDownloader(**options)
+        await downloader._setup()  # pylint: disable=protected-access
+        return downloader
+
+    @distributed_trace_async
+    async def delete_blob(self, delete_snapshots: Optional[str] = None, **kwargs: Any) -> None:
+        """Marks the specified blob for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob()
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob
+        and retains the blob for a specified number of days.
+        After the specified number of days, the blob's data is removed from the service during garbage collection.
+        Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
+        option. Soft-deleted blob can be restored using :func:`undelete` operation.
+
+        :param str delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. If specified, delete_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START delete_blob]
+                :end-before: [END delete_blob]
+                :language: python
+                :dedent: 16
+                :caption: Delete a blob.
+        """
+        options = _delete_blob_options(
+            snapshot=self.snapshot,
+            version_id=get_version_id(self.version_id, kwargs),
+            delete_snapshots=delete_snapshots,
+            **kwargs)
+        try:
+            await self._client.blob.delete(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def undelete_blob(self, **kwargs: Any) -> None:
+        """Restores soft-deleted blobs or snapshots.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        If blob versioning is enabled, the base blob cannot be restored using this
+        method. Instead use :func:`start_copy_from_url` with the URL of the blob version
+        you wish to promote to the current version.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START undelete_blob]
+                :end-before: [END undelete_blob]
+                :language: python
+                :dedent: 12
+                :caption: Undeleting a blob.
+        """
+        try:
+            await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a blob exists with the defined parameters, and returns
+        False otherwise.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        try:
+            await self._client.blob.get_properties(
+                snapshot=self.snapshot,
+                version_id=version_id,
+                **kwargs)
+            return True
+        # Encrypted with CPK
+        except ResourceExistsError:
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def get_blob_properties(self, **kwargs: Any) -> BlobProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the blob. It does not return the content of the blob.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to get properties.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: BlobProperties
+        :rtype: ~azure.storage.blob.BlobProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START get_blob_properties]
+                :end-before: [END get_blob_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting the properties for a blob.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        cpk = kwargs.pop('cpk', None)
+        cpk_info = None
+        if cpk:
+            if self.scheme.lower() != 'https':
+                raise ValueError("Customer provided encryption key must be used over HTTPS.")
+            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                               encryption_algorithm=cpk.algorithm)
+        try:
+            cls_method = kwargs.pop('cls', None)
+            if cls_method:
+                kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method)
+            blob_props = await self._client.blob.get_properties(
+                timeout=kwargs.pop('timeout', None),
+                version_id=version_id,
+                snapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=kwargs.pop('cls', None) or deserialize_blob_properties,
+                cpk_info=cpk_info,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        blob_props.name = self.blob_name
+        if isinstance(blob_props, BlobProperties):
+            blob_props.container = self.container_name
+            blob_props.snapshot = self.snapshot
+        return cast(BlobProperties, blob_props)
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, content_settings: Optional["ContentSettings"] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets system properties on the blob.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        options = _set_http_headers_options(content_settings=content_settings, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.set_http_headers(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_blob_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets user-defined metadata for the blob as one or more name-value pairs.
+
+        :param metadata:
+            Dict containing name and value pairs. Each call to this operation
+            replaces all existing metadata attached to the blob. To remove all
+            metadata from the blob, call this operation with no metadata headers.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Union[str, datetime]]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _set_blob_metadata_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.blob.set_metadata(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_immutability_policy(
+        self, immutability_policy: "ImmutabilityPolicy",
+        **kwargs: Any
+    ) -> Dict[str, str]:
+        """The Set Immutability Policy operation sets the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+        return cast(Dict[str, str], await self._client.blob.set_immutability_policy(
+            cls=return_response_headers,version_id=version_id, **kwargs))
+
+    @distributed_trace_async
+    async def delete_immutability_policy(self, **kwargs: Any) -> None:
+        """The Delete Immutability Policy operation deletes the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        await self._client.blob.delete_immutability_policy(version_id=version_id, **kwargs)
+
+    @distributed_trace_async
+    async def set_legal_hold(self, legal_hold: bool, **kwargs: Any) -> Dict[str, Union[str, datetime, bool]]:
+        """The Set Legal Hold operation sets a legal hold on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, Union[str, datetime, bool]]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        return cast(Dict[str, Union[str, datetime, bool]], await self._client.blob.set_legal_hold(
+            legal_hold, version_id=version_id, cls=return_response_headers, **kwargs))
+
+    @distributed_trace_async
+    async def create_page_blob(
+        self, size: int,
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        premium_page_blob_tier: Optional[Union[str, "PremiumPageBlobTier"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Page Blob of the specified size.
+
+        :param int size:
+            This specifies the maximum size for the page blob, up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword int sequence_number:
+            Only for Page blobs. The sequence number is a user-controlled value that you can use to
+            track requests. The value of the sequence number must be between 0
+            and 2^63 - 1.The default value is 0.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_page_blob_options(
+            size=size,
+            content_settings=content_settings,
+            metadata=metadata,
+            premium_page_blob_tier=premium_page_blob_tier,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_append_blob(
+        self, content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Append Blob. This operation creates a new 0-length append blob. The content
+        of any existing blob is overwritten with the newly initialized append blob. To add content to
+        the append blob, call the :func:`append_block` or :func:`append_block_from_url` method.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_append_blob_options(
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.append_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_snapshot(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a snapshot of the blob.
+
+        A snapshot is a read-only version of a blob that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a blob as it appears at a moment in time.
+
+        A snapshot of a blob has the same name as the base blob from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START create_blob_snapshot]
+                :end-before: [END create_blob_snapshot]
+                :language: python
+                :dedent: 12
+                :caption: Create a snapshot of the blob.
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_snapshot_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.create_snapshot(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def start_copy_from_url(
+        self, source_url: str,
+        metadata: Optional[Dict[str, str]] = None,
+        incremental_copy: bool = False,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Copies a blob from the given URL.
+
+        This operation returns a dictionary containing `copy_status` and `copy_id`,
+        which can be used to check the status of or abort the copy operation.
+        `copy_status` will be 'success' if the copy completed synchronously or
+        'pending' if the copy has been started asynchronously. For asynchronous copies,
+        the status can be checked by polling the :func:`get_blob_properties` method and
+        checking the copy status. Set `requires_sync` to True to force the copy to be synchronous.
+        The Blob service copies blobs on a best-effort basis.
+
+        The source blob for a copy operation may be a block blob, an append blob,
+        or a page blob. If the destination blob already exists, it must be of the
+        same blob type as the source blob. Any existing destination blob will be
+        overwritten. The destination blob cannot be modified while a copy operation
+        is in progress.
+
+        When copying from a page blob, the Blob service creates a destination page
+        blob of the source blob's length, initially containing all zeroes. Then
+        the source page ranges are enumerated, and non-empty ranges are copied.
+
+        For a block blob or an append blob, the Blob service creates a committed
+        blob of zero length before returning from this operation. When copying
+        from a block blob, all committed blocks and their block IDs are copied.
+        Uncommitted blocks are not copied. At the end of the copy operation, the
+        destination blob will have the same committed block count as the source.
+
+        When copying from an append blob, all committed blocks are copied. At the
+        end of the copy operation, the destination blob will have the same committed
+        block count as the source.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :param metadata:
+            Name-value pairs associated with the blob as metadata. If no name-value
+            pairs are specified, the operation will copy the metadata from the
+            source blob or file to the destination blob. If one or more name-value
+            pairs are specified, the destination blob is created with the specified
+            metadata, and metadata is not copied from the source blob or file.
+        :type metadata: dict(str, str)
+        :param bool incremental_copy:
+            Copies the snapshot of the source page blob to a destination page blob.
+            The snapshot is copied such that only the differential changes between
+            the previously copied snapshot are transferred to the destination.
+            The copied snapshots are complete copies of the original snapshot and
+            can be read or copied from as usual. Defaults to False.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_).
+
+            The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob.
+            This option is only available when `incremental_copy=False` and `requires_sync=True`.
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str) or Literal["COPY"]
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has been modified since the specified date/time.
+            If the destination blob has not been modified, the Blob service returns
+            status code 412 (Precondition Failed).
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has not been modified since the specified
+            date/time. If the destination blob has been modified, the Blob service
+            returns status code 412 (Precondition Failed).
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword source_lease:
+            Specify this to perform the Copy Blob operation only if
+            the lease ID given matches the active lease ID of the source blob.
+        :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword bool seal_destination_blob:
+            Seal the destination append blob. This operation is only for append blob.
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool requires_sync:
+            Enforces that the service will not return a response until the copy is complete.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string. This option is only available when `incremental_copy` is
+            set to False and `requires_sync` is set to True.
+
+            .. versionadded:: 12.9.0
+
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.10.0
+
+        :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
+        :rtype: dict[str, Union[str, ~datetime.datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START copy_blob_from_url]
+                :end-before: [END copy_blob_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Copy a blob from a URL.
+        """
+        options = _start_copy_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            incremental_copy=incremental_copy,
+            **kwargs)
+        try:
+            if incremental_copy:
+                return cast(Dict[str, Union[str, datetime]], await self._client.page_blob.copy_incremental(**options))
+            return cast(Dict[str, Union[str, datetime]], await self._client.blob.start_copy_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def abort_copy(
+        self, copy_id: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination blob with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID, or an
+            instance of BlobProperties.
+        :type copy_id: str or ~azure.storage.blob.BlobProperties
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START abort_copy_blob_from_url]
+                :end-before: [END abort_copy_blob_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Abort copying a blob from URL.
+        """
+        options = _abort_copy_options(copy_id, **kwargs)
+        try:
+            await self._client.blob.abort_copy_from_url(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self, lease_duration: int =-1,
+        lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> BlobLeaseClient:
+        """Requests a new lease.
+
+        If the blob does not have an active lease, the Blob
+        Service creates a lease on the blob and returns a new lease.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object.
+        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START acquire_lease_on_blob]
+                :end-before: [END acquire_lease_on_blob]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on a blob.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def set_standard_blob_tier(self, standard_blob_tier: Union[str, "StandardBlobTier"], **kwargs: Any) -> None:
+        """This operation sets the tier on a block blob.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        if standard_blob_tier is None:
+            raise ValueError("A StandardBlobTier must be specified")
+        try:
+            await self._client.blob.set_tier(
+                tier=standard_blob_tier,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                lease_access_conditions=access_conditions,
+                version_id=version_id,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def stage_block(
+        self, block_id: str,
+        data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param data: The blob data.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length: Size of the block.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob property dict.
+        :rtype: Dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_options(
+            block_id=block_id,
+            data=data,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.stage_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def stage_block_from_url(
+        self, block_id: str,
+        source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        source_content_md5: Optional[Union[bytes, bytearray]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob where
+        the contents are read from a URL.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param str source_url: The URL.
+        :param int source_offset:
+            Start of byte range to use for the block.
+            Must be set if source length is provided.
+        :param int source_length: The size of the block in bytes.
+        :param bytearray source_content_md5:
+            Specify the md5 calculated for the range of
+            bytes that must be read from the copy source.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Blob property dict.
+        :rtype: Dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_from_url_options(
+            block_id=block_id,
+            source_url=source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            source_content_md5=source_content_md5,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.stage_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_block_list(
+        self, block_list_type: str = "committed",
+        **kwargs: Any
+    ) -> Tuple[List[BlobBlock], List[BlobBlock]]:
+        """The Get Block List operation retrieves the list of blocks that have
+        been uploaded as part of a block blob.
+
+        :param str block_list_type:
+            Specifies whether to return the list of committed
+            blocks, the list of uncommitted blocks, or both lists together.
+            Possible values include: 'committed', 'uncommitted', 'all'
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A tuple of two lists - committed and uncommitted blocks
+        :rtype: Tuple[List[BlobBlock], List[BlobBlock]]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            blocks = await self._client.block_blob.get_block_list(
+                list_type=block_list_type,
+                snapshot=self.snapshot,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return _get_block_list_result(blocks)
+
+    @distributed_trace_async
+    async def commit_block_list(
+        self, block_list: List[BlobBlock],
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Commit Block List operation writes a blob by specifying the list of
+        block IDs that make up the blob.
+
+        :param list block_list:
+            List of Blockblobs.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict[str, str]
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _commit_block_list_options(
+            block_list=block_list,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.commit_block_list(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_premium_page_blob_tier(self, premium_page_blob_tier: "PremiumPageBlobTier", **kwargs: Any) -> None:
+        """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        if premium_page_blob_tier is None:
+            raise ValueError("A PremiumPageBlobTiermust be specified")
+        try:
+            await self._client.blob.set_tier(
+                tier=premium_page_blob_tier,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_blob_tags(self, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+            Each call to this operation replaces all existing tags attached to the blob. To remove all
+            tags from the blob, call this operation with no tags set.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :type tags: dict(str, str)
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the tags content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _set_blob_tags_options(version_id=version_id, tags=tags, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.set_tags(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_blob_tags(self, **kwargs: Any) -> Dict[str, str]:
+        """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to add tags to.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _get_blob_tags_options(version_id=version_id, snapshot=self.snapshot, **kwargs)
+        try:
+            _, tags = await self._client.blob.get_tags(**options)
+            return cast(Dict[str, str], parse_tags(tags))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_page_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot_diff: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob.
+
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param str previous_snapshot_diff:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous blob snapshot to be compared
+            against a more recent snapshot or the current blob.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        warnings.warn(
+            "get_page_ranges is deprecated, use list_page_ranges instead",
+            DeprecationWarning
+        )
+
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot_diff,
+            **kwargs)
+        try:
+            if previous_snapshot_diff:
+                ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+            else:
+                ranges = await self._client.page_blob.get_page_ranges(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace
+    def list_page_ranges(
+        self,
+        *,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[PageRange]:
+        """Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob. If `previous_snapshot` is specified, the result will be
+        a diff of changes between the target blob and the previous snapshot.
+
+        :keyword int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword previous_snapshot:
+            A snapshot value that specifies that the response will contain only pages that were changed
+            between target blob and previous snapshot. Changed pages include both updated and cleared
+            pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot`
+            is the older of the two.
+        :paramtype previous_snapshot: str or Dict[str, Any]
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int results_per_page:
+            The maximum number of page ranges to retrieve per API call.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of PageRange.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot,
+            **kwargs)
+
+        if previous_snapshot:
+            command = partial(
+                self._client.page_blob.get_page_ranges_diff,
+                **options)
+        else:
+            command = partial(
+                self._client.page_blob.get_page_ranges,
+                **options)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=PageRangePaged)
+
+    @distributed_trace_async
+    async def get_page_range_diff_for_managed_disk(
+        self, previous_snapshot_url: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a managed disk or snapshot.
+
+        .. note::
+            This operation is only available for managed disk accounts.
+
+        .. versionadded:: 12.2.0
+            This operation was introduced in API version '2019-07-07'.
+
+        :param str previous_snapshot_url:
+            Specifies the URL of a previous snapshot of the managed disk.
+            The response will only contain pages that were changed between the target blob and
+            its previous snapshot.
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            prev_snapshot_url=previous_snapshot_url,
+            **kwargs)
+        try:
+            ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace_async
+    async def set_sequence_number(
+        self, sequence_number_action: Union[str, "SequenceNumberAction"],
+        sequence_number: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the blob sequence number.
+
+        :param str sequence_number_action:
+            This property indicates how the service should modify the blob's sequence
+            number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
+        :param str sequence_number:
+            This property sets the blob's sequence number. The sequence number is a
+            user-controlled property that you can use to track requests and manage
+            concurrency issues.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        options = _set_sequence_number_options(sequence_number_action, sequence_number=sequence_number, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.update_sequence_number(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def resize_blob(self, size: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Resizes a page blob to the specified size.
+
+        If the specified value is less than the current size of the blob,
+        then all pages above the specified value are cleared.
+
+        :param int size:
+            Size used to resize blob. Maximum size for a page blob is up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _resize_blob_options(size=size, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.resize(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_page(
+        self, page: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Upload Pages operation writes a range of pages to a page blob.
+
+        :param bytes page:
+            Content of the page.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_page_options(
+            page=page,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.upload_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_pages_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        The Upload Pages operation writes a range of pages to a page blob where
+        the contents are read from a URL.
+
+        :param str source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword bytes source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Response after uploading pages from specified URL.
+        :rtype: Dict[str, Any]
+        """
+
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_pages_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.upload_pages_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def clear_page(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Clears a range of pages.
+
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _clear_page_options(
+            offset=offset,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.clear_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def append_block(
+        self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """Commits a new block of data to the end of the existing append blob.
+
+        :param data:
+            Content of the block.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Size of the block in bytes.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_options(
+            data=data,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.append_blob.append_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def append_block_from_url(
+        self, copy_source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """
+        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
+
+        :param str copy_source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+        :param int source_length:
+            This indicates the end of the range of bytes that has to be taken from the copy source.
+        :keyword bytearray source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the
+            AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after appending a new block.
+        :rtype: Dict[str, Union[str, datetime, int]]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_from_url_options(
+            copy_source_url=copy_source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Union[str, datetime, int]],
+                        await self._client.append_blob.append_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def seal_append_blob(self, **kwargs: Any) -> Dict[str, Union[str, datetime, int]]:
+        """The Seal operation seals the Append Blob to make it read-only.
+
+            .. versionadded:: 12.4.0
+
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        options = _seal_append_blob_options(**kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.append_blob.seal(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_container_client(self) -> "ContainerClient":
+        """Get a client to interact with the blob's parent container.
+
+        The container need not already exist. Defaults to current blob's credentials.
+
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_client_from_blob_client]
+                :end-before: [END get_container_client_from_blob_client]
+                :language: python
+                :dedent: 12
+                :caption: Get container client from blob object.
+        """
+        from ._container_client_async import ContainerClient
+        if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
+            _pipeline = AsyncPipeline(
+                transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=cast(Iterable["AsyncHTTPPolicy"],
+                              self._pipeline._impl_policies) # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return ContainerClient(
+            f"{self.scheme}://{self.primary_hostname}", container_name=self.container_name,
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py
new file mode 100644
index 00000000..8f76aa98
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py
@@ -0,0 +1,799 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from typing import (
+    Any, cast, Dict, Iterable, List, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._blob_client_async import BlobClient
+from ._container_client_async import ContainerClient
+from ._models import ContainerPropertiesPaged, FilteredBlobPaged
+from .._blob_service_client_helpers import _parse_url
+from .._deserialize import service_properties_deserialize, service_stats_deserialize
+from .._encryption import StorageEncryptionMixin
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import StorageServiceProperties, KeyInfo
+from .._models import BlobProperties, ContainerProperties, CorsRule
+from .._serialize import get_api_version
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import parse_connection_str
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.response_handlers import (
+    parse_to_internal_user_delegation_key,
+    process_storage_error,
+    return_response_headers,
+)
+from .._shared.models import LocationMode
+from .._shared.parser import _to_utc_datetime
+from .._shared.policies_async import ExponentialRetry
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.policies import AsyncHTTPPolicy
+    from datetime import datetime
+    from ._lease_async import BlobLeaseClient
+    from .._models import (
+        BlobAnalyticsLogging,
+        FilteredBlob,
+        Metrics,
+        PublicAccess,
+        RetentionPolicy,
+        StaticWebsite
+    )
+    from .._shared.models import UserDelegationKey
+
+
+class BlobServiceClient(  # type: ignore [misc]
+    AsyncStorageAccountHostsMixin,
+    StorageAccountHostsMixin,
+    StorageEncryptionMixin
+):
+    """A client to interact with the Blob Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete containers within the account.
+    For operations relating to a specific container or blob, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :param str account_url:
+        The URL to the blob storage account. Any other entities included
+        in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_service_client]
+            :end-before: [END create_blob_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with account url and credential.
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_service_client_oauth]
+            :end-before: [END create_blob_service_client_oauth]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname):
+        """Format the endpoint URL according to the current location
+        mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob service client.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string]
+                :end-before: [END auth_from_connection_string]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobServiceClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def get_user_delegation_key(
+        self, key_start_time: "datetime",
+        key_expiry_time: "datetime",
+        **kwargs: Any
+    ) -> "UserDelegationKey":
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.blob.UserDelegationKey
+        """
+        key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info,
+                                                                                     timeout=timeout,
+                                                                                     **kwargs)  # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        return parse_to_internal_user_delegation_key(user_delegation_key)  # type: ignore
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_account_info]
+                :end-before: [END get_blob_service_account_info]
+                :language: python
+                :dedent: 12
+                :caption: Getting account information for the blob service.
+        """
+        try:
+            return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_service_stats(self, **kwargs: Any) -> Dict[str, Any]:
+        """Retrieves statistics related to replication for the Blob service.
+
+        It is only available when read-access geo-redundant replication is enabled for
+        the storage account.
+
+        With geo-redundant replication, Azure Storage maintains your data durable
+        in two locations. In both locations, Azure Storage constantly maintains
+        multiple healthy replicas of your data. The location where you read,
+        create, update, or delete data is the primary storage account location.
+        The primary location exists in the region you choose at the time you
+        create an account via the Azure Management Azure classic portal, for
+        example, North Central US. The location to which your data is replicated
+        is the secondary location. The secondary location is automatically
+        determined based on the location of the primary; it is in a second data
+        center that resides in the same region as the primary location. Read-only
+        access is available from the secondary location, if read-access geo-redundant
+        replication is enabled for your storage account.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: The blob service stats.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_stats]
+                :end-before: [END get_blob_service_stats]
+                :language: python
+                :dedent: 12
+                :caption: Getting service stats for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = await self._client.service.get_statistics( # type: ignore
+                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
+            return service_stats_deserialize(stats)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An object containing blob service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_properties]
+                :end-before: [END get_blob_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting service properties for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_service_properties(
+        self, analytics_logging: Optional["BlobAnalyticsLogging"] = None,
+        hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        target_version: Optional[str] = None,
+        delete_retention_policy: Optional["RetentionPolicy"] = None,
+        static_website: Optional["StaticWebsite"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for blobs.
+        :type hour_metrics: ~azure.storage.blob.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for blobs.
+        :type minute_metrics: ~azure.storage.blob.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.blob.CorsRule]
+        :param str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :param delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted blobs.
+            It also specifies the number of days and versions of blob to keep.
+        :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
+        :param static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.blob.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START set_blob_service_properties]
+                :end-before: [END set_blob_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Setting service properties for the blob service.
+        """
+        if all(parameter is None for parameter in [
+                    analytics_logging, hour_metrics, minute_metrics, cors,
+                    target_version, delete_retention_policy, static_website]):
+            raise ValueError("set_service_properties should be called with at least one parameter")
+
+        props = StorageServiceProperties(
+            logging=analytics_logging,
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors), # pylint: disable=protected-access
+            default_service_version=target_version,
+            delete_retention_policy=delete_retention_policy,
+            static_website=static_website
+        )
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_containers(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: bool = False,
+        **kwargs: Any
+    ) -> AsyncItemPaged[ContainerProperties]:
+        """Returns a generator to list the containers under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all containers have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only containers whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that container metadata to be returned in the response.
+            The default value is `False`.
+        :keyword bool include_deleted:
+            Specifies that deleted containers to be returned in the response. This is for container restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.4.0
+        :keyword bool include_system:
+            Flag specifying that system containers should be included.
+            .. versionadded:: 12.10.0
+        :keyword int results_per_page:
+            The maximum number of container names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ContainerProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_list_containers]
+                :end-before: [END bsc_list_containers]
+                :language: python
+                :dedent: 16
+                :caption: Listing the containers in the blob service.
+        """
+        include = ['metadata'] if include_metadata else []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        include_system = kwargs.pop('include_system', None)
+        if include_system:
+            include.append("system")
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_containers_segment,
+            prefix=name_starts_with,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            page_iterator_class=ContainerPropertiesPaged
+        )
+
+    @distributed_trace
+    def find_blobs_by_tags(self, filter_expression: str, **kwargs: Any) -> AsyncItemPaged["FilteredBlob"]:
+        """The Filter Blobs operation enables callers to list blobs across all
+        containers whose tags match a given search expression.  Filter blobs
+        searches across all containers within a storage account but can be
+        scoped within the expression to a single container.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+            To specify a container, eg. "@container='containerName' and \"Name\"='C'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob]
+        """
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.service.filter_blobs,
+            where=filter_expression,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace_async
+    async def create_container(
+        self, name: str,
+        metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Creates a new container under the specified account.
+
+        If the container with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created container.
+
+        :param str name: The name of the container to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            container as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: 'container', 'blob'.
+        :type public_access: str or ~azure.storage.blob.PublicAccess
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client to interact with the newly created container.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_create_container]
+                :end-before: [END bsc_create_container]
+                :language: python
+                :dedent: 16
+                :caption: Creating a container in the blob service.
+        """
+        container = self.get_container_client(name)
+        timeout = kwargs.pop('timeout', None)
+        kwargs.setdefault('merge_span', True)
+        await container.create_container(
+            metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
+        return container
+
+    @distributed_trace_async
+    async def delete_container(
+        self, container: Union[ContainerProperties, str],
+        lease: Optional[Union["BlobLeaseClient", str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified container for deletion.
+
+        The container and any blobs contained within it are later deleted during garbage collection.
+        If the container is not found, a ResourceNotFoundError will be raised.
+
+        :param container:
+            The container to delete. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :type lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_delete_container]
+                :end-before: [END bsc_delete_container]
+                :language: python
+                :dedent: 16
+                :caption: Deleting a container in the blob service.
+        """
+        container_client = self.get_container_client(container)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await container_client.delete_container(
+            lease=lease,
+            timeout=timeout,
+            **kwargs)
+
+    @distributed_trace_async
+    async def _rename_container(self, name: str, new_name: str, **kwargs: Any) -> ContainerClient:
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str name:
+            The name of the container to rename.
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client for the renamed container.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        renamed_container = self.get_container_client(new_name)
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            await renamed_container._client.container.rename(name, **kwargs)   # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def undelete_container(
+        self, deleted_container_name: str,
+        deleted_container_version: str,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Restores soft-deleted container.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_container_name:
+            Specifies the name of the deleted container to restore.
+        :param str deleted_container_version:
+            Specifies the version of the deleted container to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The recovered soft-deleted ContainerClient.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        if new_name:
+            warnings.warn("`new_name` is no longer supported.", DeprecationWarning)
+        container = self.get_container_client(new_name or deleted_container_name)
+        try:
+            await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access
+                                                      deleted_container_version=deleted_container_version,
+                                                      timeout=kwargs.pop('timeout', None), **kwargs)
+            return container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_container_client(self, container: Union[ContainerProperties, str]) -> ContainerClient:
+        """Get a client to interact with the specified container.
+
+        The container need not already exist.
+
+        :param container:
+            The container. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_get_container_client]
+                :end-before: [END bsc_get_container_client]
+                :language: python
+                :dedent: 12
+                :caption: Getting the container client to interact with a specific container.
+        """
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies #type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return ContainerClient(
+            self.url, container_name=container_name,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+
+    def get_blob_client(
+        self, container: Union[ContainerProperties, str],
+        blob: str,
+        snapshot: Optional[Union[Dict[str, Any], str]] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param container:
+            The container that the blob is in. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param str blob:
+            The blob with which to interact.
+        :param snapshot:
+            The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
+            or a dictionary output returned by
+            :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`.
+        :type snapshot: str or dict(str, Any)
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_get_blob_client]
+                :end-before: [END bsc_get_blob_client]
+                :language: python
+                :dedent: 16
+                :caption: Getting the blob client to interact with a specific blob.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.name
+        else:
+            blob_name = blob
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=cast(Iterable["AsyncHTTPPolicy"],
+                          self._pipeline._impl_policies) # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py
new file mode 100644
index 00000000..306e3acf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py
@@ -0,0 +1,1611 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, AsyncIterator, cast, Dict, List, IO, Iterable, Optional, overload, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import unquote, urlparse
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.transport import AsyncHttpResponse  # pylint: disable=C4756
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._blob_client_async import BlobClient
+from ._download_async import StorageStreamDownloader
+from ._lease_async import BlobLeaseClient
+from ._list_blobs_helper import BlobNamesPaged, BlobPropertiesPaged, BlobPrefix
+from ._models import FilteredBlobPaged
+from .._container_client_helpers import (
+    _format_url,
+    _generate_delete_blobs_options,
+    _generate_set_tiers_options,
+    _parse_url
+)
+from .._deserialize import deserialize_container_properties
+from .._encryption import StorageEncryptionMixin
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import SignedIdentifier
+from .._list_blobs_helper import IgnoreListBlobsDeserializer
+from .._models import ContainerProperties, BlobType, BlobProperties, FilteredBlob
+from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions
+from .._shared.base_client import StorageAccountHostsMixin
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers, serialize_iso
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from ._blob_service_client_async import BlobServiceClient
+    from .._models import (
+        AccessPolicy,
+        StandardBlobTier,
+        PremiumPageBlobTier,
+        PublicAccess
+    )
+
+
+class ContainerClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin, StorageEncryptionMixin):  # type: ignore [misc]  # pylint: disable=too-many-public-methods
+    """A client to interact with a specific container, although that container
+    may not yet exist.
+
+    For operations relating to a specific blob within this container, a blob client can be
+    retrieved using the :func:`~get_blob_client` function.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the container,
+        use the :func:`from_container_url` classmethod.
+    :param container_name:
+        The name of the container for the blob.
+    :type container_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_containers_async.py
+            :start-after: [START create_container_client_from_service]
+            :end-before: [END create_container_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a ContainerClient from an existing BlobServiceClient.
+
+        .. literalinclude:: ../samples/blob_samples_containers_async.py
+            :start-after: [START create_container_client_sasurl]
+            :end-before: [END create_container_client_sasurl]
+            :language: python
+            :dedent: 12
+            :caption: Creating the container client directly.
+    """
+    def __init__(
+        self, account_url: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token = _parse_url(account_url=account_url, container_name=container_name)
+
+        self.container_name = container_name
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client()
+        self._configure_encryption(kwargs)
+
+    def _build_generated_client(self) -> AzureBlobStorage:
+        client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        client._config.version = self._api_version  # type: ignore [assignment] # pylint: disable=protected-access
+        return client
+
+    def _format_url(self, hostname):
+        return _format_url(
+            container_name=self.container_name,
+            hostname=hostname,
+            scheme=self.scheme,
+            query_str=self._query_str
+        )
+
+    @classmethod
+    def from_container_url(
+        cls, container_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a container url.
+
+        :param str container_url:
+            The full endpoint URL to the Container, including SAS token if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type container_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        try:
+            if not container_url.lower().startswith('http'):
+                container_url = "https://" + container_url
+        except AttributeError as exc:
+            raise ValueError("Container URL must be a string.") from exc
+        parsed_url = urlparse(container_url)
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {container_url}")
+
+        container_path = parsed_url.path.strip('/').split('/')
+        account_path = ""
+        if len(container_path) > 1:
+            account_path = "/" + "/".join(container_path[:-1])
+        account_url = f"{parsed_url.scheme}://{parsed_url.netloc.rstrip('/')}{account_path}?{parsed_url.query}"
+        container_name = unquote(container_path[-1])
+        if not container_name:
+            raise ValueError("Invalid URL. Please provide a URL with a valid container name")
+        return cls(account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name:
+            The container name for the blob.
+        :type container_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_container]
+                :end-before: [END auth_from_connection_string_container]
+                :language: python
+                :dedent: 8
+                :caption: Creating the ContainerClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_container(
+        self, metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """
+        Creates a new container under the specified account. If the container
+        with the same name already exists, the operation fails.
+
+        :param metadata:
+            A dict with name_value pairs to associate with the
+            container as metadata. Example:{'Category':'test'}
+        :type metadata: dict[str, str]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary of response headers.
+        :rtype: Dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START create_container]
+                :end-before: [END create_container]
+                :language: python
+                :dedent: 16
+                :caption: Creating a container to store blobs.
+        """
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata)) # type: ignore
+        timeout = kwargs.pop('timeout', None)
+        container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
+        try:
+            return await self._client.container.create( # type: ignore
+                timeout=timeout,
+                access=public_access,
+                container_cpk_scope_info=container_cpk_scope_info,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def _rename_container(self, new_name: str, **kwargs: Any) -> "ContainerClient":
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The renamed container.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            renamed_container = ContainerClient(
+                f"{self.scheme}://{self.primary_hostname}", container_name=new_name,
+                credential=self.credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+                require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+                key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+            await renamed_container._client.container.rename(self.container_name, **kwargs)   # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_container(self, **kwargs: Any) -> None:
+        """
+        Marks the specified container for deletion. The container and any blobs
+        contained within it are later deleted during garbage collection.
+
+        :keyword lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START delete_container]
+                :end-before: [END delete_container]
+                :language: python
+                :dedent: 16
+                :caption: Delete a container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.container.delete(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self, lease_duration: int =-1,
+        lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> BlobLeaseClient:
+        """
+        Requests a new lease. If the container does not have an active lease,
+        the Blob service creates a lease on the container and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START acquire_lease_on_container]
+                :end-before: [END acquire_lease_on_container]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on the container.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_container_properties(self, **kwargs: Any) -> ContainerProperties:
+        """Returns all user-defined metadata and system properties for the specified
+        container. The data returned does not include the container's list of blobs.
+
+        :keyword lease:
+            If specified, get_container_properties only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Properties for the specified container within a container object.
+        :rtype: ~azure.storage.blob.ContainerProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_properties]
+                :end-before: [END get_container_properties]
+                :language: python
+                :dedent: 16
+                :caption: Getting properties on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = await self._client.container.get_properties(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=deserialize_container_properties,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        response.name = self.container_name
+        return response # type: ignore
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a container exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        try:
+            await self._client.container.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def set_container_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets one or more user-defined name-value pairs for the specified
+        container. Each call to this operation replaces all existing metadata
+        attached to the container. To remove all metadata from the container,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the container as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_container_metadata only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START set_container_metadata]
+                :end-before: [END set_container_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Setting metadata on the container.
+        """
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return await self._client.container.set_metadata(  # type: ignore
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def _get_blob_service_client(self) -> "BlobServiceClient":
+        """Get a client to interact with the container's parent service account.
+
+        Defaults to current container's credentials.
+
+        :returns: A BlobServiceClient.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_client_from_container_client]
+                :end-before: [END get_blob_service_client_from_container_client]
+                :language: python
+                :dedent: 8
+                :caption: Get blob service client from container object.
+        """
+        from ._blob_service_client_async import BlobServiceClient
+        if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
+            _pipeline = AsyncPipeline(
+                transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=self._pipeline._impl_policies #type: ignore [arg-type] # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return BlobServiceClient(
+            f"{self.scheme}://{self.primary_hostname}",
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption,
+            encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key,
+            key_resolver_function=self.key_resolver_function, _pipeline=_pipeline)
+
+
+    @distributed_trace_async
+    async def get_container_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the specified container.
+        The permissions indicate whether container data may be accessed publicly.
+
+        :keyword lease:
+            If specified, get_container_access_policy only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_access_policy]
+                :end-before: [END get_container_access_policy]
+                :language: python
+                :dedent: 16
+                :caption: Getting the access policy on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = await self._client.container.get_access_policy(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=return_headers_and_deserialized,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('blob_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace_async
+    async def set_container_access_policy(
+        self, signed_identifiers: Dict[str, "AccessPolicy"],
+        public_access: Optional[Union[str, "PublicAccess"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the permissions for the specified container or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether blobs in a container may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the container. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START set_container_access_policy]
+                :end-before: [END set_container_access_policy]
+                :language: python
+                :dedent: 16
+                :caption: Setting access policy on the container.
+        """
+        timeout = kwargs.pop('timeout', None)
+        lease = kwargs.pop('lease', None)
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
+        signed_identifiers = identifiers # type: ignore
+
+        mod_conditions = get_modify_conditions(kwargs)
+        access_conditions = get_access_conditions(lease)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.container.set_access_policy(
+                container_acl=signed_identifiers or None,
+                timeout=timeout,
+                access=public_access,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[str, List[str]]] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START list_blobs_in_container]
+                :end-before: [END list_blobs_in_container]
+                :language: python
+                :dedent: 12
+                :caption: List the blobs in the container.
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_flat_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=BlobPropertiesPaged
+        )
+
+    @distributed_trace
+    def list_blob_names(self, **kwargs: Any) -> AsyncItemPaged[str]:
+        """Returns a generator to list the names of blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        Note that no additional properties or metadata will be returned when using this API.
+        Additionally this API does not have an option to include additional blobs such as snapshots,
+        versions, soft-deleted blobs, etc. To get any of this data, use :func:`list_blobs()`.
+
+        :keyword str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of blob names as strings.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[str]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        name_starts_with = kwargs.pop('name_starts_with', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+
+        # For listing only names we need to create a one-off generated client and
+        # override its deserializer to prevent deserialization of the full response.
+        client = self._build_generated_client()
+        client.container._deserialize = IgnoreListBlobsDeserializer()  # pylint: disable=protected-access
+
+        command = functools.partial(
+            client.container.list_blob_flat_segment,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=BlobNamesPaged)
+
+    @distributed_trace
+    def walk_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[List[str], str]] = None,
+        delimiter: str = "/",
+        **kwargs: Any
+    ) -> AsyncItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service. This operation will list blobs in accordance with a hierarchy,
+        as delimited by the specified delimiter character.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :param str delimiter:
+            When the request includes this parameter, the operation returns a BlobPrefix
+            element in the response body that acts as a placeholder for all blobs whose
+            names begin with the same substring up to the appearance of the delimiter
+            character. The delimiter may be a single character or a string.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_hierarchy_segment,
+            delimiter=delimiter,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return BlobPrefix(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            delimiter=delimiter)
+
+    @distributed_trace
+    def find_blobs_by_tags(
+        self, filter_expression: str,
+        **kwargs: Any
+    ) -> AsyncItemPaged[FilteredBlob]:
+        """Returns a generator to list the blobs under the specified container whose tags
+        match the given search expression.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of FilteredBlob.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.filter_blobs,
+            timeout=timeout,
+            where=filter_expression,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace_async
+    async def upload_blob(
+        self, name: str,
+        data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs
+    ) -> BlobClient:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param str name: The blob with which to interact.
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when the blob size exceeds
+            64MB.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :returns: A BlobClient to interact with the newly uploaded blob.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START upload_blob_to_container]
+                :end-before: [END upload_blob_to_container]
+                :language: python
+                :dedent: 12
+                :caption: Upload blob to the container.
+        """
+        if isinstance(name, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param name is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob = self.get_blob_client(name)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        await blob.upload_blob(
+            data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            timeout=timeout,
+            encoding=encoding,
+            **kwargs
+        )
+        return blob
+
+    @distributed_trace_async
+    async def delete_blob(
+        self, blob: str,
+        delete_snapshots: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified blob or snapshot for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
+        and retains the blob or snapshot for specified number of days.
+        After specified number of days, blob's data is removed from the service during garbage collection.
+        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        Soft-deleted blob or snapshot can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()`
+
+        :param str blob: The blob with which to interact.
+        :param str delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a Lease object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await blob.delete_blob( # type: ignore
+            delete_snapshots=delete_snapshots,
+            timeout=timeout,
+            **kwargs)
+
+    @overload
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace_async
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param str blob: The blob with which to interact.
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            The number of parallel connections with which to download.
+        :keyword str encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object. (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob_client = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        return await blob_client.download_blob(
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            **kwargs)
+
+    @distributed_trace_async
+    async def delete_blobs(
+        self, *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """Marks the specified blobs or snapshots for deletion.
+
+        The blobs are later deleted during garbage collection.
+        Note that in order to delete blobs, you must delete all of their
+        snapshots. You can delete both at the same time with the delete_blobs operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
+        and retains the blobs or snapshots for specified number of days.
+        After specified number of days, blobs' data is removed from the service during garbage collection.
+        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        Soft-deleted blobs or snapshots can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()`
+
+        The maximum number of blobs that can be deleted in a single request is 256.
+
+        :param blobs:
+            The blobs to delete. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                snapshot you want to delete:
+                    key: 'snapshot', value type: str
+                version id:
+                    key: 'version_id', value type: str
+                whether to delete snapshots when deleting blob:
+                    key: 'delete_snapshots', value: 'include' or 'only'
+                if the blob modified or not:
+                    key: 'if_modified_since', 'if_unmodified_since', value type: datetime
+                etag:
+                    key: 'etag', value type: str
+                match the etag or not:
+                    key: 'match_condition', value type: MatchConditions
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: Union[str, Dict[str, Any], BlobProperties]
+        :keyword str delete_snapshots:
+            Required if a blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START delete_multiple_blobs]
+                :end-before: [END delete_multiple_blobs]
+                :language: python
+                :dedent: 12
+                :caption: Deleting multiple blobs.
+        """
+        if len(blobs) == 0:
+            return AsyncList([])
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+
+        reqs, options = _generate_delete_blobs_options(
+            self._query_str,
+            self.container_name,
+            self._client,
+            *blobs,
+            **kwargs
+        )
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    @distributed_trace_async
+    async def set_standard_blob_tier_blobs(
+        self, standard_blob_tier: Union[str, 'StandardBlobTier'],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """This operation sets the tier on block blobs.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :param blobs:
+            The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                standard blob tier:
+                    key: 'blob_tier', value type: StandardBlobTier
+                rehydrate priority:
+                    key: 'rehydrate_priority', value type: RehydratePriority
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            standard_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    @distributed_trace_async
+    async def set_premium_page_blob_tier_blobs(
+        self, premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set on all blobs to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                premium blob tier:
+                    key: 'blob_tier', value type: PremiumPageBlobTier
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            premium_page_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    def get_blob_client(
+        self, blob: str,
+        snapshot: Optional[str] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param str blob:
+            The blob with which to interact.
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`~BlobClient.create_snapshot()`.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_blob_client]
+                :end-before: [END get_blob_client]
+                :language: python
+                :dedent: 12
+                :caption: Get the blob client.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.get('name')
+        else:
+            blob_name = blob
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py
new file mode 100644
index 00000000..4676c2e6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py
@@ -0,0 +1,872 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+# mypy: disable-error-code=override
+
+import asyncio
+import codecs
+import sys
+import warnings
+from io import BytesIO, StringIO
+from itertools import islice
+from typing import (
+    Any, AsyncIterator, Awaitable,
+    Generator, Callable, cast, Dict,
+    Generic, IO, Optional, overload,
+    Tuple, TypeVar, Union, TYPE_CHECKING
+)
+
+from azure.core.exceptions import DecodeError, HttpResponseError, IncompleteReadError
+
+from .._shared.request_handlers import validate_and_format_range_headers
+from .._shared.response_handlers import parse_length_from_content_range, process_storage_error
+from .._deserialize import deserialize_blob_properties, get_page_ranges_result
+from .._download import process_range_and_offset, _ChunkDownloader
+from .._encryption import (
+    adjust_blob_size_for_encryption,
+    decrypt_blob,
+    is_encryption_v2,
+    parse_encryption_data
+)
+
+if TYPE_CHECKING:
+    from codecs import IncrementalDecoder
+    from .._encryption import _EncryptionData
+    from .._generated.aio import AzureBlobStorage
+    from .._models import BlobProperties
+    from .._shared.models import StorageConfiguration
+
+
+T = TypeVar('T', bytes, str)
+
+
+async def process_content(data: Any, start_offset: int, end_offset: int, encryption: Dict[str, Any]) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+    await data.response.load_body()
+    content = cast(bytes, data.response.body())
+    if encryption.get('key') is not None or encryption.get('resolver') is not None:
+        try:
+            return decrypt_blob(
+                encryption.get('required') or False,
+                encryption.get('key'),
+                encryption.get('resolver'),
+                content,
+                start_offset,
+                end_offset,
+                data.response.headers)
+        except Exception as error:
+            raise HttpResponseError(
+                message="Decryption failed.",
+                response=data.response,
+                error=error) from error
+    return content
+
+
+class _AsyncChunkDownloader(_ChunkDownloader):
+    def __init__(self, **kwargs: Any) -> None:
+        super(_AsyncChunkDownloader, self).__init__(**kwargs)
+        self.stream_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+        self.progress_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+
+    async def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data, _ = await self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            await self._write_to_stream(chunk_data, chunk_start)
+            await self._update_progress(length)
+
+    async def yield_chunk(self, chunk_start: int) -> Tuple[bytes, int]:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return await self._download_chunk(chunk_start, chunk_end - 1)
+
+    async def _update_progress(self, length: int) -> None:
+        if self.progress_lock_async:
+            async with self.progress_lock_async:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await cast(Callable[[int, Optional[int]], Awaitable[Any]], self.progress_hook)(
+                self.progress_total, self.total_size)
+
+    async def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock_async:
+            async with self.stream_lock_async:
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    async def _download_chunk(self, chunk_start: int, chunk_end: int) -> Tuple[bytes, int]:
+        if self.encryption_options is None:
+            raise ValueError("Required argument is missing: encryption_options")
+        download_range, offset = process_range_and_offset(
+            chunk_start, chunk_end, chunk_end, self.encryption_options, self.encryption_data
+        )
+
+        # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
+        # Do optimize and create empty chunk locally if condition is met.
+        if self._do_optimize(download_range[0], download_range[1]):
+            content_length = download_range[1] - download_range[0] + 1
+            chunk_data = b"\x00" * content_length
+        else:
+            range_header, range_validation = validate_and_format_range_headers(
+                download_range[0],
+                download_range[1],
+                check_content_md5=self.validate_content
+            )
+
+            retry_active = True
+            retry_total = 3
+            while retry_active:
+                try:
+                    _, response = await cast(Awaitable[Any], self.client.download(
+                        range=range_header,
+                        range_get_content_md5=range_validation,
+                        validate_content=self.validate_content,
+                        data_stream_total=self.total_size,
+                        download_stream_current=self.progress_total,
+                        **self.request_options
+                    ))
+                except HttpResponseError as error:
+                    process_storage_error(error)
+
+                try:
+                    chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
+                    retry_active = False
+                except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                    retry_total -= 1
+                    if retry_total <= 0:
+                        raise HttpResponseError(error, error=error) from error
+                    await asyncio.sleep(1)
+            content_length = response.content_length
+
+            # This makes sure that if_match is set so that we can validate
+            # that subsequent downloads are to an unmodified blob
+            if self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = response.properties.etag
+
+        return chunk_data, content_length
+
+
+class _AsyncChunkIterator(object):
+    """Async iterator for chunks in blob download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_AsyncChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> None:
+        raise TypeError("Async stream must be iterated asynchronously.")
+
+    def __aiter__(self) -> AsyncIterator[bytes]:
+        return self
+
+    # Iterate through responses.
+    async def __anext__(self) -> bytes:
+        if self._complete:
+            raise StopAsyncIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            chunk = next(self._iter_chunks)
+            self._current_content += (await self._iter_downloader.yield_chunk(chunk))[0]
+        except StopIteration as exc:
+            self._complete = True
+            # it's likely that there some data left in self._current_content
+            if self._current_content:
+                return self._current_content
+            raise StopAsyncIteration("Download complete") from exc
+
+        return self._get_chunk_data()
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(Generic[T]):  # pylint: disable=too-many-instance-attributes
+    """
+    A streaming object to download from Azure Storage.
+    """
+
+    name: str
+    """The name of the blob being downloaded."""
+    container: str
+    """The name of the container where the blob is."""
+    properties: "BlobProperties"
+    """The properties of the blob being downloaded. If only a range of the data is being
+    downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+    otherwise the total size of the blob."""
+
+    def __init__(
+        self,
+        clients: "AzureBlobStorage" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        encryption_options: Dict[str, Any] = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        container: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        download_cls: Optional[Callable] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.container = container
+        self.size = 0
+
+        self._clients = clients
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._encryption_options = encryption_options or {}
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._response = None
+        self._location_mode = None
+        self._current_content: Union[str, bytes] = b''
+        self._file_size = 0
+        self._non_empty_ranges = None
+        self._encryption_data: Optional["_EncryptionData"] = None
+
+        # The content download offset, after any processing (decryption), in bytes
+        self._download_offset = 0
+        # The raw download offset, before processing (decryption), in bytes
+        self._raw_download_offset = 0
+        # The offset the stream has been read to in bytes or chars depending on mode
+        self._read_offset = 0
+        # The offset into current_content that has been consumed in bytes or chars depending on mode
+        self._current_content_offset = 0
+
+        self._text_mode: Optional[bool] = None
+        self._decoder: Optional["IncrementalDecoder"] = None
+        # Whether the current content is the first chunk of download content or not
+        self._first_chunk = True
+        self._download_start = self._start_range or 0
+
+        # The cls is passed in via download_cls to avoid conflicting arg name with Generic.__new__
+        # but needs to be changed to cls in the request options.
+        self._request_options['cls'] = download_cls
+
+    def __len__(self):
+        return self.size
+
+    async def _get_encryption_data_request(self) -> None:
+        # Save current request cls
+        download_cls = self._request_options.pop('cls', None)
+        # Adjust cls for get_properties
+        self._request_options['cls'] = deserialize_blob_properties
+
+        properties = cast("BlobProperties", await self._clients.blob.get_properties(**self._request_options))
+        # This will return None if there is no encryption metadata or there are parsing errors.
+        # That is acceptable here, the proper error will be caught and surfaced when attempting
+        # to decrypt the blob.
+        self._encryption_data = parse_encryption_data(properties.metadata)
+
+        # Restore cls for download
+        self._request_options['cls'] = download_cls
+
+    async def _setup(self) -> None:
+        if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None:
+            await self._get_encryption_data_request()
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        first_get_size = (
+            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
+        )
+        initial_request_start = self._start_range if self._start_range is not None else 0
+        if self._end_range is not None and self._end_range - initial_request_start < first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + first_get_size - 1
+
+        # pylint: disable-next=attribute-defined-outside-init
+        self._initial_range, self._initial_offset = process_range_and_offset(
+            initial_request_start,
+            initial_request_end,
+            self._end_range,
+            self._encryption_options,
+            self._encryption_data
+        )
+
+        self._response = await self._initial_request()
+        self.properties = cast("BlobProperties", self._response.properties)  # type: ignore [attr-defined]
+        self.properties.name = self.name
+        self.properties.container = self.container
+
+        # Set the content length to the download size instead of the size of the last range
+        self.properties.size = self.size
+        self.properties.content_range = (f"bytes {self._download_start}-"
+                                         f"{self._end_range if self._end_range is not None else self._file_size - 1}/"
+                                         f"{self._file_size}")
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+    @property
+    def _download_complete(self):
+        if is_encryption_v2(self._encryption_data):
+            return self._download_offset >= self.size
+        return self._raw_download_offset >= self.size
+
+    async def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content
+        )
+
+        retry_active = True
+        retry_total = 3
+        while retry_active:
+            try:
+                location_mode, response = cast(Tuple[Optional[str], Any], await self._clients.blob.download(
+                    range=range_header,
+                    range_get_content_md5=range_validation,
+                    validate_content=self._validate_content,
+                    data_stream_total=None,
+                    download_stream_current=0,
+                    **self._request_options
+                ))
+
+                # Check the location we read from to ensure we use the same one
+                # for subsequent requests.
+                self._location_mode = location_mode
+
+                # Parse the total file size and adjust the download size if ranges
+                # were specified
+                self._file_size = parse_length_from_content_range(response.properties.content_range)
+                if self._file_size is None:
+                    raise ValueError("Required Content-Range response header is missing or malformed.")
+                # Remove any extra encryption data size from blob size
+                self._file_size = adjust_blob_size_for_encryption(self._file_size, self._encryption_data)
+
+                if self._end_range is not None and self._start_range is not None:
+                    # Use the length unless it is over the end of the file
+                    self.size = min(self._file_size - self._start_range, self._end_range - self._start_range + 1)
+                elif self._start_range is not None:
+                    self.size = self._file_size - self._start_range
+                else:
+                    self.size = self._file_size
+
+            except HttpResponseError as error:
+                if self._start_range is None and error.response and error.status_code == 416:
+                    # Get range will fail on an empty file. If the user did not
+                    # request a range, do a regular get request in order to get
+                    # any properties.
+                    try:
+                        _, response = cast(Tuple[Optional[Any], Any], await self._clients.blob.download(
+                            validate_content=self._validate_content,
+                            data_stream_total=0,
+                            download_stream_current=0,
+                            **self._request_options))
+                    except HttpResponseError as e:
+                        process_storage_error(e)
+
+                    # Set the download size to empty
+                    self.size = 0
+                    self._file_size = 0
+                else:
+                    process_storage_error(error)
+
+            try:
+                if self.size == 0:
+                    self._current_content = b""
+                else:
+                    self._current_content = await process_content(
+                        response,
+                        self._initial_offset[0],
+                        self._initial_offset[1],
+                        self._encryption_options
+                    )
+                retry_active = False
+            except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                retry_total -= 1
+                if retry_total <= 0:
+                    raise HttpResponseError(error, error=error) from error
+                await asyncio.sleep(1)
+        self._download_offset += len(self._current_content)
+        self._raw_download_offset += response.content_length
+
+        # get page ranges to optimize downloading sparse page blob
+        if response.properties.blob_type == 'PageBlob':
+            try:
+                page_ranges = await self._clients.page_blob.get_page_ranges()
+                self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
+            except HttpResponseError:
+                pass
+
+        if not self._download_complete and self._request_options.get("modified_access_conditions"):
+            self._request_options["modified_access_conditions"].if_match = response.properties.etag
+
+        return response
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """
+        Iterate over chunks in the download stream. Note, the iterator returned will
+        iterate over the entire download content, regardless of any data that was
+        previously read.
+
+        NOTE: If the stream has been partially read, some data may be re-downloaded by the iterator.
+
+        :returns: An async iterator of the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START download_a_blob_in_chunk]
+                :end-before: [END download_a_blob_in_chunk]
+                :language: python
+                :dedent: 16
+                :caption: Download a blob using chunks().
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. chunks is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with chunks as only bytes are supported.")
+
+        iter_downloader = None
+        # If we still have the first chunk buffered, use it. Otherwise, download all content again
+        if not self._first_chunk or not self._download_complete:
+            if self._first_chunk:
+                start = self._download_start + len(self._current_content)
+                current_progress = len(self._current_content)
+            else:
+                start = self._download_start
+                current_progress = 0
+
+            end = self._download_start + self.size
+
+            iter_downloader = _AsyncChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=current_progress,
+                start_range=start,
+                end_range=end,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                **self._request_options
+            )
+
+        initial_content = self._current_content if self._first_chunk else b''
+        return _AsyncChunkIterator(
+            size=self.size,
+            content=cast(bytes, initial_content),
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size)
+
+    @overload
+    async def read(self, size: int = -1) -> T:
+        ...
+
+    @overload
+    async def read(self, *, chars: Optional[int] = None) -> T:
+        ...
+
+    # pylint: disable-next=too-many-statements,too-many-branches
+    async def read(self, size: int = -1, *, chars: Optional[int] = None) -> T:
+        """
+        Read the specified bytes or chars from the stream. If `encoding`
+        was specified on `download_blob`, it is recommended to use the
+        chars parameter to read a specific number of chars to avoid decoding
+        errors. If size/chars is unspecified or negative all bytes will be read.
+
+        :param int size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set negative to download all bytes.
+        :keyword Optional[int] chars:
+            The number of chars to download from the stream. Leave unspecified
+            or set negative to download all chars. Note, this can only be used
+            when encoding is specified on `download_blob`.
+        :returns:
+            The requested data as bytes or a string if encoding was specified. If
+            the return value is empty, there is no more data to read.
+        :rtype: T
+        """
+        if size > -1 and self._encoding:
+            warnings.warn(
+                "Size parameter specified with text encoding enabled. It is recommended to use chars "
+                "to read a specific number of characters instead."
+            )
+        if size > -1 and chars is not None:
+            raise ValueError("Cannot specify both size and chars.")
+        if not self._encoding and chars is not None:
+            raise ValueError("Must specify encoding to read chars.")
+        if self._text_mode and size > -1:
+            raise ValueError("Stream has been partially read in text mode. Please use chars.")
+        if self._text_mode is False and chars is not None:
+            raise ValueError("Stream has been partially read in bytes mode. Please use size.")
+
+        # Empty blob or already read to the end
+        if (size == 0 or chars == 0 or
+                (self._download_complete and self._current_content_offset >= len(self._current_content))):
+            return b'' if not self._encoding else ''  # type: ignore [return-value]
+
+        if not self._text_mode and chars is not None and self._encoding is not None:
+            self._text_mode = True
+            self._decoder = codecs.getincrementaldecoder(self._encoding)('strict')
+            self._current_content = self._decoder.decode(
+                cast(bytes, self._current_content), final=self._download_complete)
+        elif self._text_mode is None:
+            self._text_mode = False
+
+        output_stream: Union[BytesIO, StringIO]
+        if self._text_mode:
+            output_stream = StringIO()
+            size = sys.maxsize if chars is None or chars <= 0 else chars
+        else:
+            output_stream = BytesIO()
+            size = size if size > 0 else sys.maxsize
+        readall = size == sys.maxsize
+        count = 0
+
+        # Start by reading from current_content
+        start = self._current_content_offset
+        length = min(len(self._current_content) - self._current_content_offset, size - count)
+        read = output_stream.write(self._current_content[start:start + length])  # type: ignore [arg-type]
+
+        count += read
+        self._current_content_offset += read
+        self._read_offset += read
+        await self._check_and_report_progress()
+
+        remaining = size - count
+        if remaining > 0 and not self._download_complete:
+            # Create a downloader than can download the rest of the file
+            start = self._download_start + self._download_offset
+            end = self._download_start + self.size
+
+            parallel = self._max_concurrency > 1
+            downloader = _AsyncChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._read_offset,
+                start_range=start,
+                end_range=end,
+                stream=output_stream,
+                parallel=parallel,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                progress_hook=self._progress_hook,
+                **self._request_options
+            )
+            self._first_chunk = False
+
+            # When reading all data, have the downloader read everything into the stream.
+            # Else, read one chunk at a time (using the downloader as an iterator) until
+            # the requested size is reached.
+            chunks_iter = downloader.get_chunk_offsets()
+            if readall and not self._text_mode:
+                running_futures: Any = [
+                    asyncio.ensure_future(downloader.process_chunk(d))
+                    for d in islice(chunks_iter, 0, self._max_concurrency)
+                ]
+                while running_futures:
+                    # Wait for some download to finish before adding a new one
+                    done, running_futures = await asyncio.wait(
+                        running_futures, return_when=asyncio.FIRST_COMPLETED)
+                    try:
+                        for task in done:
+                            task.result()
+                    except HttpResponseError as error:
+                        process_storage_error(error)
+                    try:
+                        for _ in range(0, len(done)):
+                            next_chunk = next(chunks_iter)
+                            running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+                    except StopIteration:
+                        break
+
+                if running_futures:
+                    # Wait for the remaining downloads to finish
+                    done, _running_futures = await asyncio.wait(running_futures)
+                    try:
+                        for task in done:
+                            task.result()
+                    except HttpResponseError as error:
+                        process_storage_error(error)
+
+                self._complete_read()
+
+            else:
+                while (chunk := next(chunks_iter, None)) is not None and remaining > 0:
+                    chunk_data, content_length = await downloader.yield_chunk(chunk)
+                    self._download_offset += len(chunk_data)
+                    self._raw_download_offset += content_length
+                    if self._text_mode and self._decoder is not None:
+                        self._current_content = self._decoder.decode(chunk_data, final=self._download_complete)
+                    else:
+                        self._current_content = chunk_data
+
+                    if remaining < len(self._current_content):
+                        read = output_stream.write(self._current_content[:remaining])  # type: ignore [arg-type]
+                    else:
+                        read = output_stream.write(self._current_content)  # type: ignore [arg-type]
+
+                    self._current_content_offset = read
+                    self._read_offset += read
+                    remaining -= read
+                    await self._check_and_report_progress()
+
+        data = output_stream.getvalue()
+        if not self._text_mode and self._encoding:
+            try:
+                # This is technically incorrect to do, but we have it for backwards compatibility.
+                data = cast(bytes, data).decode(self._encoding)
+            except UnicodeDecodeError:
+                warnings.warn(
+                    "Encountered a decoding error while decoding blob data from a partial read. "
+                    "Try using the `chars` keyword instead to read in text mode."
+                )
+                raise
+
+        return data  # type: ignore [return-value]
+
+    async def readall(self) -> T:
+        """
+        Read the entire contents of this blob.
+        This operation is blocking until all data is downloaded.
+
+        :returns: The requested data as bytes or a string if encoding was specified.
+        :rtype: T
+        """
+        return await self.read()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this blob to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. readinto is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with readinto as only byte streams are supported.")
+
+        # the stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # If some data has been streamed using `read`, only stream the remaining data
+        remaining_size = self.size - self._read_offset
+        # Already read to the end
+        if remaining_size <= 0:
+            return 0
+
+        # Write the current content to the user stream
+        current_remaining = len(self._current_content) - self._current_content_offset
+        start = self._current_content_offset
+        count = stream.write(cast(bytes, self._current_content[start:start + current_remaining]))
+
+        self._current_content_offset += count
+        self._read_offset += count
+        if self._progress_hook:
+            await self._progress_hook(self._read_offset, self.size)
+
+        # If all the data was already downloaded/buffered
+        if self._download_complete:
+            return remaining_size
+
+        data_start = self._download_start + self._read_offset
+        data_end = self._download_start + self.size
+
+        downloader = _AsyncChunkDownloader(
+            client=self._clients.blob,
+            non_empty_ranges=self._non_empty_ranges,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._read_offset,
+            start_range=data_start,
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            encryption_options=self._encryption_options,
+            encryption_data=self._encryption_data,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            **self._request_options
+        )
+
+        dl_tasks = downloader.get_chunk_offsets()
+        running_futures = {
+            asyncio.ensure_future(downloader.process_chunk(d))
+            for d in islice(dl_tasks, 0, self._max_concurrency)
+        }
+        while running_futures:
+            # Wait for some download to finish before adding a new one
+            done, running_futures = await asyncio.wait(
+                running_futures, return_when=asyncio.FIRST_COMPLETED)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+            try:
+                for _ in range(0, len(done)):
+                    next_chunk = next(dl_tasks)
+                    running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+            except StopIteration:
+                break
+
+        if running_futures:
+            # Wait for the remaining downloads to finish
+            done, _running_futures = await asyncio.wait(running_futures)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+
+        self._complete_read()
+        return remaining_size
+
+    def _complete_read(self):
+        """Adjusts all offsets to the end of the download."""
+        self._download_offset = self.size
+        self._raw_download_offset = self.size
+        self._read_offset = self.size
+        self._current_content_offset = len(self._current_content)
+
+    async def _check_and_report_progress(self):
+        """Reports progress if necessary."""
+        # Only report progress at the end of each chunk and use download_offset to always report
+        # progress in terms of (approximate) byte count.
+        if self._progress_hook and self._current_content_offset == len(self._current_content):
+            await self._progress_hook(self._download_offset, self.size)
+
+    async def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_bytes is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        return await self.readall()
+
+    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this blob, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :returns: The content of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_text is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return await self.readall()
+
+    async def download_to_stream(self, stream, max_concurrency=1):
+        """DEPRECATED: Download the contents of this blob to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO[T] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded blob.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "download_to_stream is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        await self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py
new file mode 100644
index 00000000..97334d96
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py
@@ -0,0 +1,72 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import inspect
+import sys
+from io import BytesIO
+from typing import IO
+
+from .._encryption import _GCM_REGION_DATA_LENGTH, encrypt_data_v2
+
+
+class GCMBlobEncryptionStream:
+    """
+    An async stream that performs AES-GCM encryption on the given data as
+    it's streamed. Data is read and encrypted in regions. The stream
+    will use the same encryption key and will generate a guaranteed unique
+    nonce for each encryption region.
+    """
+    def __init__(
+        self, content_encryption_key: bytes,
+        data_stream: IO[bytes],
+    ) -> None:
+        """
+        :param bytes content_encryption_key: The encryption key to use.
+        :param IO[bytes] data_stream: The data stream to read data from.
+        """
+        self.content_encryption_key = content_encryption_key
+        self.data_stream = data_stream
+
+        self.offset = 0
+        self.current = b''
+        self.nonce_counter = 0
+
+    async def read(self, size: int = -1) -> bytes:
+        """
+        Read data from the stream. Specify -1 to read all available data.
+
+        :param int size: The amount of data to read. Defaults to -1 for all data.
+        :return: The bytes read.
+        :rtype: bytes
+        """
+        result = BytesIO()
+        remaining = sys.maxsize if size == -1 else size
+
+        while remaining > 0:
+            # Start by reading from current
+            if len(self.current) > 0:
+                read = min(remaining, len(self.current))
+                result.write(self.current[:read])
+
+                self.current = self.current[read:]
+                self.offset += read
+                remaining -= read
+
+            if remaining > 0:
+                # Read one region of data and encrypt it
+                data = self.data_stream.read(_GCM_REGION_DATA_LENGTH)
+                if inspect.isawaitable(data):
+                    data = await data
+
+                if len(data) == 0:
+                    # No more data to read
+                    break
+
+                self.current = encrypt_data_v2(data, self.nonce_counter, self.content_encryption_key)
+                # IMPORTANT: Must increment the nonce each time.
+                self.nonce_counter += 1
+
+        return result.getvalue()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py
new file mode 100644
index 00000000..b5bfad95
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py
@@ -0,0 +1,346 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+from typing import Any, Optional, Union, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._serialize import get_modify_conditions
+
+if TYPE_CHECKING:
+    from azure.storage.blob.aio import BlobClient, ContainerClient
+    from datetime import datetime
+
+
+class BlobLeaseClient(): # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new BlobLeaseClient.
+
+    This client provides lease operations on a BlobClient or ContainerClient.
+    :param client: The client of the blob or container to lease.
+    :type client: Union[BlobClient, ContainerClient]
+    :param lease_id: A string representing the lease ID of an existing lease. This value does not need to be
+    specified in order to acquire a new lease, or break one.
+    :type lease_id: Optional[str]
+    """
+
+    id: str
+    """The ID of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired."""
+    etag: Optional[str]
+    """The ETag of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired or modified."""
+    last_modified: Optional["datetime"]
+    """The last modified timestamp of the lease currently being maintained.
+    This will be `None` if no lease has yet been acquired or modified."""
+
+    def __init__( # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["BlobClient", "ContainerClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'blob_name'):
+            self._client = client._client.blob
+        elif hasattr(client, 'container_name'):
+            self._client = client._client.container
+        else:
+            raise TypeError("Lease must use either BlobClient or ContainerClient.")
+
+    def __enter__(self):
+        raise TypeError("Async lease must use 'async with'.")
+
+    def __exit__(self, *args):
+        self.release()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, lease_duration: int = -1, **kwargs: Any) -> None:
+        """Requests a new lease.
+
+        If the container does not have an active lease, the Blob service creates a
+        lease on the container and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace_async
+    async def renew(self, **kwargs: Any) -> None:
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the container or blob. Note that
+        the lease may be renewed even if it has expired as long as the container
+        or blob has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def release(self, **kwargs: Any) -> None:
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the container or blob. Releasing the lease allows another client
+        to immediately acquire the lease for the container or blob as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def break_lease(self, lease_break_period: Optional[int] = None, **kwargs: Any) -> int:
+        """Break the lease, if the container or blob has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the container or blob.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                break_period=lease_break_period,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time') # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py
new file mode 100644
index 00000000..1731a318
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py
@@ -0,0 +1,249 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Callable, List, Optional
+from urllib.parse import unquote
+
+from azure.core.async_paging import AsyncItemPaged, AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._deserialize import (
+    get_blob_properties_from_generated_code,
+    load_many_xml_nodes,
+    load_xml_int,
+    load_xml_string
+)
+from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+from .._models import BlobProperties
+from .._shared.models import DictMixin
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_context_and_deserialized,
+    return_raw_deserialized
+)
+
+
+class BlobPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Blob properties."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        location_mode: Optional[str] = None,
+    ) -> None:
+        super(BlobPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobProperties):
+            return item
+        if isinstance(item, BlobItemInternal):
+            blob = get_blob_properties_from_generated_code(item)
+            blob.container = self.container  # type: ignore [assignment]
+            return blob
+        return item
+
+
+class BlobNamesPaged(AsyncPageIterator):
+    """An Iterable of Blob names."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of blobs to retrieve per call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(BlobNamesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_raw_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.get('ServiceEndpoint')
+        self.prefix = load_xml_string(self._response, 'Prefix')
+        self.marker = load_xml_string(self._response, 'Marker')
+        self.results_per_page = load_xml_int(self._response, 'MaxResults')
+        self.container = self._response.get('ContainerName')
+
+        blobs = load_many_xml_nodes(self._response, 'Blob', wrapper='Blobs')
+        self.current_page = [load_xml_string(blob, 'Name') for blob in blobs]
+
+        next_marker = load_xml_string(self._response, 'NextMarker')
+        return next_marker or None, self.current_page
+
+
+class BlobPrefix(AsyncItemPaged, DictMixin):
+    """An Iterable of Blob properties.
+
+    Returned from walk_blobs when a delimiter is used.
+    Can be thought of as a virtual blob directory."""
+
+    name: str
+    """The prefix, or "directory name" of the blob."""
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: str
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    next_marker: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: str
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    delimiter: str
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+    container: str
+    """The name of the container."""
+
+    def __init__(self, *args, **kwargs):
+        super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
+        self.name = kwargs.get('prefix')
+        self.prefix = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.container = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class BlobPrefixPaged(BlobPropertiesPaged):
+    def __init__(self, *args, **kwargs):
+        super(BlobPrefixPaged, self).__init__(*args, **kwargs)
+        self.name = self.prefix
+
+    async def _extract_data_cb(self, get_next_return):
+        continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
+        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return continuation_token, self.current_page
+
+    def _build_item(self, item):
+        item = super(BlobPrefixPaged, self)._build_item(item)
+        if isinstance(item, GenBlobPrefix):
+            if item.name.encoded:
+                name = unquote(item.name.content)
+            else:
+                name = item.name.content
+            return BlobPrefix(
+                self._command,
+                container=self.container,
+                prefix=name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py
new file mode 100644
index 00000000..27d1d8fa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py
@@ -0,0 +1,199 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+
+from typing import Callable, List, Optional, TYPE_CHECKING
+
+from azure.core.async_paging import AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._deserialize import parse_tags
+from .._generated.models import FilterBlobItem
+from .._models import ContainerProperties, FilteredBlob, parse_page_list
+from .._shared.response_handlers import process_storage_error, return_context_and_deserialized
+
+if TYPE_CHECKING:
+    from .._models import BlobProperties
+
+
+class ContainerPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Container properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only containers whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of container names to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A container name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: List[ContainerProperties]
+    """The current page of listed results."""
+
+    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
+        super(ContainerPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [self._build_item(item) for item in self._response.container_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        return ContainerProperties._from_generated(item)  # pylint: disable=protected-access
+
+
+class FilteredBlobPaged(AsyncPageIterator):
+    """An Iterable of Blob properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] container: The name of the container.
+    :param Optional[int] results_per_page: The maximum number of blobs to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    :param Optional[str] location_mode:
+    Specifies the location the request should be sent to. This mode only applies for RA-GRS accounts
+        which allow secondary read access. Options include 'primary' or 'secondary'.
+    """
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: Optional[List["BlobProperties"]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(FilteredBlobPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.marker = continuation_token
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.marker = self._response.next_marker
+        self.current_page = [self._build_item(item) for item in self._response.blobs]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, FilterBlobItem):
+            tags = parse_tags(item.tags)
+            blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags)
+            return blob
+        return item
+
+
+class PageRangePaged(AsyncPageIterator):
+    def __init__(self, command, results_per_page=None, continuation_token=None):
+        super(PageRangePaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = self._build_page(self._response)
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_page(response):
+        if not response:
+            raise StopIteration
+
+        return parse_page_list(response)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py
new file mode 100644
index 00000000..794beee3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py
@@ -0,0 +1,334 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import inspect
+from io import SEEK_SET, UnsupportedOperation
+from typing import Any, cast, Dict, IO, Optional, TypeVar, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError, ResourceModifiedError
+
+from ._encryption_async import GCMBlobEncryptionStream
+from .._encryption import (
+    encrypt_blob,
+    get_adjusted_upload_size,
+    get_blob_encryptor_and_padder,
+    generate_blob_encryption_data,
+    _ENCRYPTION_PROTOCOL_V1,
+    _ENCRYPTION_PROTOCOL_V2
+)
+from .._generated.models import (
+    AppendPositionAccessConditions,
+    BlockLookupList,
+    ModifiedAccessConditions
+)
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._shared.uploads_async import (
+    AppendBlobChunkUploader,
+    BlockBlobChunkUploader,
+    PageBlobChunkUploader,
+    upload_data_chunks,
+    upload_substream_blocks
+)
+from .._upload_helpers import _any_conditions, _convert_mod_error
+
+if TYPE_CHECKING:
+    from .._generated.aio.operations import AppendBlobOperations, BlockBlobOperations, PageBlobOperations
+    from .._shared.models import StorageConfiguration
+    BlobLeaseClient = TypeVar("BlobLeaseClient")
+
+
+async def upload_block_blob(  # pylint: disable=too-many-locals, too-many-statements
+    client: "BlockBlobOperations",
+    stream: IO,
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    validate_content: bool,
+    max_concurrency: Optional[int],
+    length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        adjusted_count = length
+        if (encryption_options.get('key') is not None) and (adjusted_count is not None):
+            adjusted_count = get_adjusted_upload_size(adjusted_count, encryption_options['version'])
+        blob_headers = kwargs.pop('blob_headers', None)
+        tier = kwargs.pop('standard_blob_tier', None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+        immutability_policy = kwargs.pop('immutability_policy', None)
+        immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time
+        immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode
+        legal_hold = kwargs.pop('legal_hold', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        # Do single put if the size is smaller than config.max_single_put_size
+        if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size):
+            data = stream.read(length or -1)
+            if inspect.isawaitable(data):
+                data = await data
+            if not isinstance(data, bytes):
+                raise TypeError('Blob data should be of type bytes.')
+
+            if encryption_options.get('key'):
+                if not isinstance(data, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                encryption_data, data = encrypt_blob(data, encryption_options['key'], encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_data
+
+            response = cast(Dict[str, Any], await client.upload(
+                body=data,  # type: ignore [arg-type]
+                content_length=adjusted_count,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                cls=return_response_headers,
+                validate_content=validate_content,
+                data_stream_total=adjusted_count,
+                upload_stream_current=0,
+                tier=tier.value if tier else None,
+                blob_tags_string=blob_tags_string,
+                immutability_policy_expiry=immutability_policy_expiry,
+                immutability_policy_mode=immutability_policy_mode,
+                legal_hold=legal_hold,
+                **kwargs))
+
+            if progress_hook:
+                await progress_hook(adjusted_count, adjusted_count)
+
+            return response
+
+        use_original_upload_path = blob_settings.use_byte_buffer or \
+            validate_content or encryption_options.get('required') or \
+            blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            total_size = length
+            encryptor, padder = None, None
+            if encryption_options and encryption_options.get('key'):
+                cek, iv, encryption_metadata = generate_blob_encryption_data(
+                    encryption_options['key'],
+                    encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_metadata
+
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                    encryptor, padder = get_blob_encryptor_and_padder(cek, iv, True)
+
+                # Adjust total_size for encryption V2
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V2:
+                    # Adjust total_size for encryption V2
+                    total_size = adjusted_count
+                    # V2 wraps the data stream with an encryption stream
+                    if cek is None:
+                        raise ValueError("Generate encryption metadata failed. 'cek' is None.")
+                    stream = GCMBlobEncryptionStream(cek, stream)  # type: ignore [assignment]
+
+            block_ids = await upload_data_chunks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=total_size,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                encryptor=encryptor,
+                padder=padder,
+                headers=headers,
+                **kwargs
+            )
+        else:
+            block_ids = await upload_substream_blocks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs
+            )
+
+        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+        block_lookup.latest = block_ids
+        return cast(Dict[str, Any], await client.commit_block_list(
+            block_lookup,
+            blob_http_headers=blob_headers,
+            cls=return_response_headers,
+            validate_content=validate_content,
+            headers=headers,
+            tier=tier.value if tier else None,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            **kwargs))
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+async def upload_page_blob(
+    client: "PageBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        if length is None or length < 0:
+            raise ValueError("A content length must be specified for a Page Blob.")
+        if length % 512 != 0:
+            raise ValueError(f"Invalid page blob size: {length}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        tier = None
+        if kwargs.get('premium_page_blob_tier'):
+            premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
+            try:
+                tier = premium_page_blob_tier.value
+            except AttributeError:
+                tier = premium_page_blob_tier
+
+        if encryption_options and encryption_options.get('key'):
+            cek, iv, encryption_data = generate_blob_encryption_data(
+                encryption_options['key'],
+                encryption_options['version'])
+            headers['x-ms-meta-encryptiondata'] = encryption_data
+
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        response = cast(Dict[str, Any], await client.create(
+            content_length=0,
+            blob_content_length=length,
+            blob_sequence_number=None,  # type: ignore [arg-type]
+            blob_http_headers=kwargs.pop('blob_headers', None),
+            blob_tags_string=blob_tags_string,
+            tier=tier,
+            cls=return_response_headers,
+            headers=headers,
+            **kwargs))
+        if length == 0:
+            return cast(Dict[str, Any], response)
+
+        if encryption_options and encryption_options.get('key'):
+            if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                encryptor, padder = get_blob_encryptor_and_padder(cek, iv, False)
+                kwargs['encryptor'] = encryptor
+                kwargs['padder'] = padder
+
+        kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
+        return cast(Dict[str, Any], await upload_data_chunks(
+            service=client,
+            uploader_class=PageBlobChunkUploader,
+            total_size=length,
+            chunk_size=blob_settings.max_page_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            headers=headers,
+            **kwargs))
+
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+async def upload_append_blob(  # pylint: disable=unused-argument
+    client: "AppendBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if length == 0:
+            return {}
+        blob_headers = kwargs.pop('blob_headers', None)
+        append_conditions = AppendPositionAccessConditions(
+            max_size=kwargs.pop('maxsize_condition', None),
+            append_position=None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        try:
+            if overwrite:
+                await client.create(
+                    content_length=0,
+                    blob_http_headers=blob_headers,
+                    headers=headers,
+                    blob_tags_string=blob_tags_string,
+                    **kwargs)
+            return cast(Dict[str, Any], await upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            if error.response.status_code != 404:  # type: ignore [union-attr]
+                raise
+            # rewind the request body if it is a stream
+            if hasattr(stream, 'read'):
+                try:
+                    # attempt to rewind the body to the initial position
+                    stream.seek(0, SEEK_SET)
+                except UnsupportedOperation as exc:
+                    # if body is not seekable, then retry would not work
+                    raise error from exc
+            await client.create(
+                content_length=0,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                blob_tags_string=blob_tags_string,
+                **kwargs)
+            return cast(Dict[str, Any], await upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/blob/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/py.typed
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py
new file mode 100644
index 00000000..1dbc5064
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/__init__.py
@@ -0,0 +1,110 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download import StorageStreamDownloader
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._file_system_client import FileSystemClient
+from ._data_lake_service_client import DataLakeServiceClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._models import (
+    AccessControlChangeCounters,
+    AccessControlChangeFailure,
+    AccessControlChangeResult,
+    AccessControlChanges,
+    AccessPolicy,
+    AccountSasPermissions,
+    AnalyticsLogging,
+    ArrowDialect,
+    ArrowType,
+    ContentSettings,
+    CorsRule,
+    CustomerProvidedEncryptionKey,
+    DataLakeFileQueryError,
+    DeletedPathProperties,
+    DelimitedJsonDialect,
+    DelimitedTextDialect,
+    DirectoryProperties,
+    DirectorySasPermissions,
+    EncryptionScopeOptions,
+    FileProperties,
+    FileSasPermissions,
+    FileSystemProperties,
+    FileSystemPropertiesPaged,
+    FileSystemSasPermissions,
+    LeaseProperties,
+    LocationMode,
+    Metrics,
+    PathProperties,
+    PublicAccess,
+    QuickQueryDialect,
+    ResourceTypes,
+    RetentionPolicy,
+    StaticWebsite,
+    UserDelegationKey,
+)
+
+from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \
+    generate_file_sas
+
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.models import StorageErrorCode, Services
+from ._version import VERSION
+
+__version__ = VERSION
+
+__all__ = [
+    'AccessControlChangeCounters',
+    'AccessControlChangeFailure',
+    'AccessControlChangeResult',
+    'AccessControlChanges',
+    'AccessPolicy',
+    'AccountSasPermissions',
+    'AnalyticsLogging',
+    'ArrowDialect',
+    'ArrowType',
+    'ContentSettings',
+    'CorsRule',
+    'CustomerProvidedEncryptionKey',
+    'DataLakeDirectoryClient',
+    'DataLakeFileClient',
+    'DataLakeFileQueryError',
+    'DataLakeFileQueryError',
+    'DataLakeLeaseClient',
+    'DataLakeServiceClient',
+    'DeletedPathProperties',
+    'DelimitedJsonDialect',
+    'DelimitedTextDialect',
+    'DirectoryProperties',
+    'DirectorySasPermissions',
+    'EncryptionScopeOptions',
+    'ExponentialRetry',
+    'FileProperties',
+    'FileSasPermissions',
+    'FileSystemClient',
+    'FileSystemProperties',
+    'FileSystemPropertiesPaged',
+    'FileSystemSasPermissions',
+    'generate_account_sas',
+    'generate_directory_sas',
+    'generate_file_sas',
+    'generate_file_system_sas',
+    'LeaseProperties',
+    'LinearRetry',
+    'LocationMode',
+    'Metrics',
+    'PathProperties',
+    'PublicAccess',
+    'QuickQueryDialect',
+    'ResourceTypes',
+    'RetentionPolicy',
+    'StaticWebsite',
+    'StorageErrorCode',
+    'StorageStreamDownloader',
+    'UserDelegationKey',
+    'VERSION',
+    'Services'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py
new file mode 100644
index 00000000..0aa26b8a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_directory_client.py
@@ -0,0 +1,759 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+
+from ._data_lake_file_client import DataLakeFileClient
+from ._deserialize import deserialize_dir_properties
+from ._list_paths_helper import PathPropertiesPaged
+from ._models import DirectoryProperties, FileProperties
+from ._path_client import PathClient
+from ._shared.base_client import TransportWrapper, parse_connection_str
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from datetime import datetime
+    from ._models import PathProperties
+
+
+class DataLakeDirectoryClient(PathClient):
+    """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param directory_name:
+        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+    :type directory_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+            :start-after: [START instantiate_directory_client_from_conn_str]
+            :end-before: [END instantiate_directory_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        directory_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name,
+                                                      credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            directory_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeDirectoryClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name:
+            The name of file system to interact with.
+        :type file_system_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :param directory_name:
+            The name of directory to interact with. The directory is under file system.
+        :type directory_name: str
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :return: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, directory_name=directory_name,
+            credential=credential, **kwargs)
+
+    @distributed_trace
+    def create_directory(self, metadata=None,  # type: Optional[Dict[str, str]]
+                         **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new directory.
+
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 8
+                :caption: Create directory.
+        """
+        return self._create('directory', metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def delete_directory(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified directory for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 4
+                :caption: Delete directory.
+        """
+        return self._delete(recursive=True, **kwargs)
+
+    @distributed_trace
+    def get_directory_properties(self, **kwargs):
+        # type: (**Any) -> DirectoryProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the directory. It does not return the content of the directory.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.DirectoryProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            DirectoryProperties with all user-defined metadata, standard HTTP properties,
+            and system properties for the directory. It does not return the content of the directory.
+        :rtype: ~azure.storage.filedatalake.DirectoryProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START get_directory_properties]
+                :end-before: [END get_directory_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return self._get_path_properties(cls=deserialize_dir_properties, **kwargs)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a directory exists, False otherwise.
+        :rtype: bool
+        """
+        return self._exists(**kwargs)
+
+    @distributed_trace
+    def rename_directory(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeDirectoryClient
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            the new directory name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeDirectoryClient with the renamed directory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory.py
+                :start-after: [START rename_directory]
+                :end-before: [END rename_directory]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source directory.
+        """
+        new_file_system, new_path, new_dir_sas = self._parse_rename_path(new_name)
+
+        new_directory_client = DataLakeDirectoryClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, directory_name=new_path,
+            credential=self._raw_credential or new_dir_sas, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=self._pipeline)
+        new_directory_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_directory_client
+
+    @distributed_trace
+    def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create a subdirectory and return the subdirectory client to be interacted with.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        subdir.create_directory(metadata=metadata, **kwargs)
+        return subdir
+
+    @distributed_trace
+    def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                             **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified subdirectory for deletion.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        subdir.delete_directory(**kwargs)
+        return subdir
+
+    @distributed_trace
+    def create_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create a new file and return the file client to be interacted with.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeFileClient with newly created file.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        file_client = self.get_file_client(file)
+        file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def get_paths(
+        self, *,
+        recursive: bool = True,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> ItemPaged["PathProperties"]:
+        """Returns a generator to list the paths under specified file system and directory.
+        The generator will lazily follow the continuation tokens returned by the service.
+
+        :keyword bool recursive: Set True for recursive, False for iterative. The default value is True.
+        :keyword Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword Optional[bool] upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is None. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. The default value is None.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+        """
+        timeout = kwargs.pop('timeout', None)
+        hostname = self._hosts[self._location_mode]
+        url = f"{self.scheme}://{hostname}/{quote(self.file_system_name)}"
+        client = self._build_generated_client(url)
+        command = functools.partial(
+            client.file_system.list_paths,
+            path=self.path_name,
+            timeout=timeout,
+            **kwargs
+        )
+        return ItemPaged(
+            command, recursive, path=self.path_name, max_results=max_results,
+            upn=upn, page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    def get_file_client(self, file  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        try:
+            file_path = file.get('name')
+        except AttributeError:
+            file_path = self.path_name + '/' + str(file)
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
+                                 ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified subdirectory of the current directory.
+
+        The sub subdirectory need not already exist.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        try:
+            subdir_path = sub_directory.get('name')
+        except AttributeError:
+            subdir_path = self.path_name + '/' + str(sub_directory)
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(
+            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py
new file mode 100644
index 00000000..386e8bf1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_file_client.py
@@ -0,0 +1,983 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from io import BytesIO
+from typing import (
+    Any, AnyStr, AsyncIterable, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from ._quick_query_helper import DataLakeFileQueryReader
+from ._shared.base_client import parse_connection_str
+from ._shared.request_handlers import get_length, read_length
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import IterStreamer
+from ._shared.uploads_async import AsyncIterStreamer
+from ._upload_helper import upload_datalake_file
+from ._download import StorageStreamDownloader
+from ._path_client import PathClient
+from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \
+    convert_datetime_to_rfc1123, get_cpk_info, get_lease_action_properties
+from ._deserialize import process_storage_error, deserialize_file_properties
+from ._models import FileProperties, DataLakeFileQueryError
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import ContentSettings
+
+
+class DataLakeFileClient(PathClient):
+    """A client to interact with the DataLake file, even if the file may not yet exist.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :type file_path: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+            :start-after: [START instantiate_file_client_from_conn_str]
+            :end-before: [END instantiate_file_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        file_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+                                                 credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            file_path: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeFileClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name: The name of file system to interact with.
+        :type file_system_name: str
+        :param str file_path:
+            The whole file path, so that to interact with a specific file.
+            eg. "{directory}/{subdirectory}/{file}"
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, file_path=file_path,
+            credential=credential, **kwargs)
+
+    @distributed_trace
+    def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
+                    metadata=None,  # type: Optional[Dict[str, str]]
+                    **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new file.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: Optional[Dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 4
+                :caption: Create file.
+        """
+        return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def delete_file(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified file for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 4
+                :caption: Delete file.
+        """
+        return self._delete(**kwargs)
+
+    @distributed_trace
+    def get_file_properties(self, **kwargs):
+        # type: (**Any) -> FileProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file. It does not return the content of the file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.FileProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: All user-defined metadata, standard HTTP properties, and system properties for the file.
+        :rtype: ~azure.storage.filedatalake.FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START get_file_properties]
+                :end-before: [END get_file_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return self._get_path_properties(cls=deserialize_file_properties, **kwargs)
+
+    @distributed_trace
+    def set_file_expiry(self, expiry_options,  # type: str
+                        expires_on=None,   # type: Optional[Union[datetime, int]]
+                        **kwargs):
+        # type: (...) -> None
+        """Sets the time a file will expire and be deleted.
+
+        :param str expiry_options:
+            Required. Indicates mode of the expiry time.
+            Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+        :param datetime or int expires_on:
+            The time to set the file to expiry.
+            When expiry_options is RelativeTo*, expires_on should be an int in milliseconds.
+            If the type of expires_on is datetime, it should be in UTC time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        if isinstance(expires_on, datetime):
+            expires_on = convert_datetime_to_rfc1123(expires_on)
+        elif expires_on is not None:
+            expires_on = str(expires_on)
+        self._datalake_client_for_blob_operation.path \
+            .set_expiry(expiry_options, expires_on=expires_on, **kwargs)
+
+    def _upload_options(
+            self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            **kwargs
+        ) -> Dict[str, Any]:
+
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        if length is None:
+            length = get_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        if isinstance(data, bytes):
+            stream = BytesIO(data)
+        elif hasattr(data, 'read'):
+            stream = data
+        elif hasattr(data, '__iter__'):
+            stream = IterStreamer(data, encoding=encoding)
+        elif hasattr(data, '__aiter__'):
+            stream = AsyncIterStreamer(data, encoding=encoding)
+        else:
+            raise TypeError(f"Unsupported data type: {type(data)}")
+
+        validate_content = kwargs.pop('validate_content', False)
+        content_settings = kwargs.pop('content_settings', None)
+        metadata = kwargs.pop('metadata', None)
+        max_concurrency = kwargs.pop('max_concurrency', 1)
+
+        kwargs['properties'] = add_metadata_headers(metadata)
+        kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
+        kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
+        kwargs['cpk_info'] = get_cpk_info(self.scheme, kwargs)
+
+        if content_settings:
+            kwargs['path_http_headers'] = get_path_http_headers(content_settings)
+
+        kwargs['stream'] = stream
+        kwargs['length'] = length
+        kwargs['validate_content'] = validate_content
+        kwargs['max_concurrency'] = max_concurrency
+        kwargs['client'] = self._client.path
+        kwargs['file_settings'] = self._config
+
+        return kwargs
+
+    @distributed_trace
+    def upload_data(
+            self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            overwrite: Optional[bool] = False,
+            **kwargs
+        ) -> Dict[str, Any]:
+        """
+        Upload data to a file.
+
+        :param data: Content to be uploaded to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int length: Size of the data in bytes.
+        :param bool overwrite: to overwrite an existing file or not.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the blob as metadata.
+        :paramtype metadata: Optional[Dict[str, str]]
+        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str permissions: Optional and only valid if Hierarchical Namespace
+         is enabled for the account. Sets POSIX access permissions for the file
+         owner, the file owning group, and others. Each class may be granted
+         read, write, or execute permission.  The sticky bit is also supported.
+         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+         supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int chunk_size:
+            The maximum chunk size for uploading a file in chunks.
+            Defaults to 100*1024*1024, or 100MB.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        options = self._upload_options(
+            data,
+            length=length,
+            overwrite=overwrite,
+            **kwargs)
+        return upload_datalake_file(**options)
+
+    @staticmethod
+    def _append_data_options(
+            data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+            offset, # type: int
+            scheme, # type: str
+            length=None, # type: Optional[int]
+            **kwargs
+        ):
+        # type: (...) -> Dict[str, Any]
+
+        if isinstance(data, str):
+            data = data.encode(kwargs.pop('encoding', 'UTF-8'))  # type: ignore
+        if length is None:
+            length = get_length(data)
+            if length is None:
+                length, data = read_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        cpk_info = get_cpk_info(scheme, kwargs)
+        kwargs.update(get_lease_action_properties(kwargs))
+
+        options = {
+            'body': data,
+            'position': offset,
+            'content_length': length,
+            'validate_content': kwargs.pop('validate_content', False),
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def append_data(self, data,  # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+                    offset,  # type: int
+                    length=None,  # type: Optional[int]
+                    **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime, int]]
+        """Append data to the file.
+
+        :param data: Content to be appended to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int offset: start position of the data to be appended to.
+        :param length: Size of the data in bytes.
+        :type length: int or None
+        :keyword bool flush:
+            If true, will commit the data after it is appended.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete. Requires `flush=True`.
+            "acquire-release" - Acquire a lease and release it once the operations is complete. Requires `flush=True`.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: dict of the response header.
+        :rtype: dict[str, str], dict[str, ~datetime.datetime], or dict[str, int]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START append_data]
+                :end-before: [END append_data]
+                :language: python
+                :dedent: 4
+                :caption: Append data to the file.
+        """
+        options = self._append_data_options(
+            data=data,
+            offset=offset,
+            scheme=self.scheme,
+            length=length,
+            **kwargs)
+        try:
+            return self._client.path.append_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _flush_data_options(
+            offset, # type: int
+            scheme, # type: str
+            content_settings=None, # type: Optional[ContentSettings]
+            retain_uncommitted_data=False, # type: Optional[bool]
+            **kwargs
+        ):
+        # type: (...) -> Dict[str, Any]
+
+        mod_conditions = get_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        cpk_info = get_cpk_info(scheme, kwargs)
+        kwargs.update(get_lease_action_properties(kwargs))
+
+        options = {
+            'position': offset,
+            'content_length': 0,
+            'path_http_headers': path_http_headers,
+            'retain_uncommitted_data': retain_uncommitted_data,
+            'close': kwargs.pop('close', False),
+            'modified_access_conditions': mod_conditions,
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def flush_data(self, offset,  # type: int
+                   retain_uncommitted_data=False,   # type: Optional[bool]
+                   **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """ Commit the previous appended data.
+
+        :param int offset: offset is equal to the length of the file after commit
+            the previous appended data.
+        :param bool retain_uncommitted_data: Valid only for flush operations.  If
+            "true", uncommitted data is retained after the flush operation
+            completes; otherwise, the uncommitted data is deleted after the flush
+            operation.  The default is false.  Data at offsets less than the
+            specified position are written to the file when flush succeeds, but
+            this optional parameter allows data after the flush position to be
+            retained for a future flush operation.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword bool close: Azure Storage Events allow applications to receive
+            notifications when files change. When Azure Storage Events are
+            enabled, a file changed event is raised. This event has a property
+            indicating whether this is the final change to distinguish the
+            difference between an intermediate flush to a file stream and the
+            final close of a file stream. The close query parameter is valid only
+            when the action is "flush" and change notifications are enabled. If
+            the value of close is "true" and the flush operation completes
+            successfully, the service raises a file change notification with a
+            property indicating that this is the final update (the file stream has
+            been closed). If "false" a change notification is raised indicating
+            the file has changed. The default is false. This query parameter is
+            set to true by the Hadoop ABFS driver to indicate that the file stream
+            has been closed."
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete.
+            "acquire-release" - Acquire a lease and release it once the operations is complete.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: response header in dict
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START upload_file_to_file_system]
+                :end-before: [END upload_file_to_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Commit the previous appended data.
+        """
+        options = self._flush_data_options(
+            offset,
+            self.scheme,
+            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+        try:
+            return self._client.path.flush_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def download_file(self, offset=None, length=None, **kwargs):
+        # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content, or readinto() must be used to download the file into
+        a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword lease:
+            If specified, download only succeeds if the file's lease is active
+            and matches this ID. Required if the file has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.filedatalake.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START read_file]
+                :end-before: [END read_file]
+                :language: python
+                :dedent: 4
+                :caption: Return the downloaded data.
+        """
+        downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+        return StorageStreamDownloader(downloader)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file exists, otherwise returns False.
+        :rtype: bool
+        """
+        return self._exists(**kwargs)
+
+    @distributed_trace
+    def rename_file(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeFileClient
+        """
+        Rename the source file.
+
+        :param str new_name: the new file name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+         the source path must have an active lease and the lease ID must
+         match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: the renamed file client
+        :rtype: DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download.py
+                :start-after: [START rename_file]
+                :end-before: [END rename_file]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source file.
+        """
+        new_file_system, new_path, new_file_sas = self._parse_rename_path(new_name)
+
+        new_file_client = DataLakeFileClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, file_path=new_path,
+            credential=self._raw_credential or new_file_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode
+        )
+        new_file_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_file_client
+
+    @distributed_trace
+    def query_file(self, query_expression, **kwargs):
+        # type: (str, **Any) -> DataLakeFileQueryReader
+        """
+        Enables users to select/project on datalake file data by providing simple query expressions.
+        This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data.
+
+        :param str query_expression:
+            Required. a query statement.
+            eg. Select * from DataLakeStorage
+        :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error:
+            A function to be called on any processing errors returned by the service.
+        :keyword file_format:
+            Optional. Defines the serialization of the data currently stored in the file. The default is to
+            treat the file data as CSV data formatted in the default dialect. This can be overridden with
+            a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum).
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string.
+        :paramtype file_format:
+            ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or
+            ~azure.storage.filedatalake.QuickQueryDialect or str
+        :keyword output_format:
+            Optional. Defines the output serialization for the data stream. By default the data will be returned
+            as it is represented in the file. By providing an output format,
+            the file data will be reformatted according to that profile.
+            This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect.
+            These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string.
+        :paramtype output_format:
+            ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect
+            or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A streaming object (DataLakeFileQueryReader)
+        :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_query.py
+                :start-after: [START query]
+                :end-before: [END query]
+                :language: python
+                :dedent: 4
+                :caption: select/project on datalake file data by providing simple query expressions.
+        """
+        query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage")
+        blob_quick_query_reader = self._blob_client.query_blob(query_expression,
+                                                               blob_format=kwargs.pop('file_format', None),
+                                                               error_cls=DataLakeFileQueryError,
+                                                               **kwargs)
+        return DataLakeFileQueryReader(blob_quick_query_reader)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py
new file mode 100644
index 00000000..0f65f1c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_lease.py
@@ -0,0 +1,273 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import (
+    Union, Optional, Any,
+    TypeVar, TYPE_CHECKING
+)
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobLeaseClient
+
+
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    FileSystemClient = TypeVar("FileSystemClient")
+    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+    DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(object):  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new DataLakeLeaseClient.
+
+    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file system, directory, or file to lease.
+    :type client: ~azure.storage.filedatalake.FileSystemClient or
+        ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(
+            self, client, lease_id=None
+    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+
+        if hasattr(client, '_blob_client'):
+            _client = client._blob_client  # type: ignore
+        elif hasattr(client, '_container_client'):
+            _client = client._container_client  # type: ignore
+        else:
+            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.release()
+
+    @distributed_trace
+    def acquire(self, lease_duration=-1, **kwargs):
+        # type: (int, Optional[int], **Any) -> None
+        """Requests a new lease.
+
+        If the file/file system does not have an active lease, the DataLake service creates a
+        lease on the file/file system and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def renew(self, **kwargs):
+        # type: (Any) -> None
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the file system or file. Note that
+        the lease may be renewed even if it has expired as long as the file system
+        or file has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.renew(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def release(self, **kwargs):
+        # type: (Any) -> None
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the file system or file. Releasing the lease allows another client
+        to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.release(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def change(self, proposed_lease_id, **kwargs):
+        # type: (str, Any) -> None
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace
+    def break_lease(self, lease_break_period=None, **kwargs):
+        # type: (Optional[int], Any) -> int
+        """Break the lease, if the file system or file has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the file system or file.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
+
+    def _update_lease_client_attributes(self):
+        self.id = self._blob_lease_client.id  # type: str
+        self.last_modified = self._blob_lease_client.last_modified  # type: datetime
+        self.etag = self._blob_lease_client.etag  # type: str
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py
new file mode 100644
index 00000000..6ec34b94
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_data_lake_service_client.py
@@ -0,0 +1,633 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+from urllib.parse import urlparse
+
+from typing_extensions import Self
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobServiceClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._deserialize import get_datalake_service_properties
+from ._file_system_client import FileSystemClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_file_client import DataLakeFileClient
+from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode
+from ._serialize import convert_dfs_url_to_blob_url, get_api_version
+from ._generated import AzureDataLakeStorageRESTAPI
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+
+
+class DataLakeServiceClient(StorageAccountHostsMixin):
+    """A client to interact with the DataLake Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete file systems within the account.
+    For operations relating to a specific file system, directory or file, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the datalake service endpoint.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URL to the DataLake storage account. Any other entities included
+        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_service.py
+            :start-after: [START create_datalake_service_client]
+            :end-before: [END create_datalake_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the DataLakeServiceClient from connection string.
+
+        .. literalinclude:: ../samples/datalake_samples_service.py
+            :start-after: [START create_datalake_service_client_oauth]
+            :end-before: [END create_datalake_service_client_oauth]
+            :language: python
+            :dedent: 8
+            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+            self, account_url: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("Account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        self._blob_account_url = blob_account_url
+        self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs)
+        self._blob_service_client._hosts[LocationMode.SECONDARY] = ""
+
+        _, sas_token = parse_query(parsed_url.query)
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs',
+                                                    credential=self._raw_credential, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)
+
+    def __enter__(self):
+        self._blob_service_client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._blob_service_client.close()
+        super(DataLakeServiceClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    def _format_url(self, hostname):
+        """Format the endpoint URL according to hostname.
+
+        :param str hostname: The hostname for the endpoint URL.
+        :returns: The formatted URL
+        :rtype: str
+        """
+        formatted_url = f"{self.scheme}://{hostname}/{self._query_str}"
+        return formatted_url
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create DataLakeServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential from azure.core.credentials, an account shared access
+            key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A DataLakeServiceClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_data_lake_service_client_from_conn_str]
+                :end-before: [END create_data_lake_service_client_from_conn_str]
+                :language: python
+                :dedent: 8
+                :caption: Creating the DataLakeServiceClient from a connection string.
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace
+    def get_user_delegation_key(self, key_start_time,  # type: datetime
+                                key_expiry_time,  # type: datetime
+                                **kwargs  # type: Any
+                                ):
+        # type: (...) -> UserDelegationKey
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_user_delegation_key]
+                :end-before: [END get_user_delegation_key]
+                :language: python
+                :dedent: 8
+                :caption: Get user delegation key from datalake service client.
+        """
+        delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time,
+                                                                           key_expiry_time=key_expiry_time,
+                                                                           **kwargs)
+        return UserDelegationKey._from_generated(delegation_key)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
+                          include_metadata=None,  # type: Optional[bool]
+                          **kwargs):
+        # type: (...) -> ItemPaged[FileSystemProperties]
+        """Returns a generator to list the file systems under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all file systems have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only file systems whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that file system metadata be returned in the response.
+            The default value is `False`.
+        :keyword int results_per_page:
+            The maximum number of file system names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword bool include_deleted:
+            Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.3.0
+        :keyword bool include_system:
+            Flag specifying that system filesystems should be included.
+            .. versionadded:: 12.6.0
+        :returns: An iterable (auto-paging) of FileSystemProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START list_file_systems]
+                :end-before: [END list_file_systems]
+                :language: python
+                :dedent: 8
+                :caption: Listing the file systems in the datalake service.
+        """
+        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+                                                               include_metadata=include_metadata,
+                                                               **kwargs)
+        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
+        return item_paged
+
+    @distributed_trace
+    def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                           metadata=None,  # type: Optional[Dict[str, str]]
+                           public_access=None,  # type: Optional[PublicAccess]
+                           **kwargs):
+        # type: (...) -> FileSystemClient
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param str file_system:
+            The name of the file system to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: file system, file.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with newly created file system.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START create_file_system_from_service_client]
+                :end-before: [END create_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Creating a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+        return file_system_client
+
+    def _rename_file_system(self, name, new_name, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str name:
+            The name of the filesystem to rename.
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with the specified file system renamed.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        self._blob_service_client._rename_container(name, new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = self.get_file_system_client(new_name)
+        return renamed_file_system
+
+    @distributed_trace
+    def undelete_file_system(self, name, deleted_version, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Restores soft-deleted filesystem.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.3.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str name:
+            Specifies the name of the deleted filesystem to restore.
+        :param str deleted_version:
+            Specifies the version of the deleted filesystem to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: The restored solft-deleted FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        file_system = self.get_file_system_client(new_name or name)
+        self._blob_service_client.undelete_container(
+            name, deleted_version, new_name=new_name, **kwargs)
+        return file_system
+
+    @distributed_trace
+    def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                           **kwargs):
+        # type: (...) -> FileSystemClient
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :param file_system:
+            The file system to delete. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A FileSystemClient with the specified file system deleted.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START delete_file_system_from_service_client]
+                :end-before: [END delete_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Deleting a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        file_system_client.delete_file_system(**kwargs)
+        return file_system_client
+
+    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
+                               ):
+        # type: (...) -> FileSystemClient
+        """Get a client to interact with the specified file system.
+
+        The file system need not already exist.
+
+        :param file_system:
+            The file system. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system_client_from_service]
+                :end-before: [END create_file_system_client_from_service]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file system client to interact with a specific file system.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+                                api_version=self.api_version,
+                                _configuration=self._config,
+                                _pipeline=_pipeline, _hosts=self._hosts)
+
+    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                             directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param file_system:
+            The file system that the directory is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_directory_client_from_service_client]
+                :end-before: [END get_directory_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            directory_name = directory.name
+        except AttributeError:
+            directory_name = directory
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                        file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_system:
+            The file system that the file is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param file_path:
+            The file with which to interact. This can either be the full path of the file(from the root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service.py
+                :start-after: [START get_file_client_from_service_client]
+                :end-before: [END get_file_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            file_path = file_path.name
+        except AttributeError:
+            pass
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace
+    def set_service_properties(self, **kwargs):
+        # type: (**Any) -> None
+        """Sets the properties of a storage account's Datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :keyword analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+        :keyword hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates.
+        :type hour_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute.
+        :type minute_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.filedatalake.CorsRule]
+        :keyword str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :keyword delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted files/directories.
+            It also specifies the number of days and versions of file/directory to keep.
+        :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+        :keyword static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.filedatalake.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        return self._blob_service_client.set_service_properties(**kwargs)
+
+    @distributed_trace
+    def get_service_properties(self, **kwargs):
+        # type: (**Any) -> Dict[str, Any]
+        """Gets the properties of a storage account's datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An object containing datalake service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: dict[str, Any]
+        """
+        props = self._blob_service_client.get_service_properties(**kwargs)
+        return get_datalake_service_properties(props)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py
new file mode 100644
index 00000000..9ebaa641
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_deserialize.py
@@ -0,0 +1,241 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn, TYPE_CHECKING
+from xml.etree.ElementTree import Element
+
+from azure.core.pipeline.policies import ContentDecodePolicy
+from azure.core.exceptions import (
+    HttpResponseError,
+    DecodeError,
+    ResourceModifiedError,
+    ClientAuthenticationError,
+    ResourceNotFoundError,
+    ResourceExistsError
+)
+from ._models import (
+    FileProperties,
+    DirectoryProperties,
+    LeaseProperties,
+    DeletedPathProperties,
+    StaticWebsite,
+    RetentionPolicy,
+    Metrics,
+    AnalyticsLogging,
+    PathProperties
+)
+from ._shared.models import StorageErrorCode
+from ._shared.response_handlers import deserialize_metadata
+
+if TYPE_CHECKING:
+    pass
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def deserialize_dir_properties(response, obj, headers):
+    metadata = deserialize_metadata(response, obj, headers)
+    dir_properties = DirectoryProperties(
+        metadata=metadata,
+        owner=response.headers.get('x-ms-owner'),
+        group=response.headers.get('x-ms-group'),
+        permissions=response.headers.get('x-ms-permissions'),
+        acl=response.headers.get('x-ms-acl'),
+        **headers
+    )
+    return dir_properties
+
+
+def deserialize_file_properties(response, obj, headers):
+    metadata = deserialize_metadata(response, obj, headers)
+    # DataLake specific headers that are not deserialized in blob are pulled directly from the raw response header
+    file_properties = FileProperties(
+        metadata=metadata,
+        encryption_context=response.headers.get('x-ms-encryption-context'),
+        owner=response.headers.get('x-ms-owner'),
+        group=response.headers.get('x-ms-group'),
+        permissions=response.headers.get('x-ms-permissions'),
+        acl=response.headers.get('x-ms-acl'),
+        **headers
+    )
+    if 'Content-Range' in headers:
+        if 'x-ms-blob-content-md5' in headers:
+            file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
+        else:
+            file_properties.content_settings.content_md5 = None
+    return file_properties
+
+
+def deserialize_path_properties(path_list):
+    return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access
+
+
+def return_headers_and_deserialized_path_list(response, deserialized, response_headers):  # pylint: disable=name-too-long, unused-argument
+    return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers)
+
+
+def get_deleted_path_properties_from_generated_code(generated):  # pylint: disable=name-too-long
+    deleted_path = DeletedPathProperties()
+    deleted_path.name = generated.name
+    deleted_path.deleted_time = generated.properties.deleted_time
+    deleted_path.remaining_retention_days = generated.properties.remaining_retention_days
+    deleted_path.deletion_id = generated.deletion_id
+    return deleted_path
+
+
+def is_file_path(_, __, headers):
+    if headers['x-ms-resource-type'] == "file":
+        return True
+    return False
+
+
+def get_datalake_service_properties(datalake_properties):
+    datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated(    # pylint: disable=protected-access
+        datalake_properties["analytics_logging"])
+    datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"])  # pylint: disable=protected-access
+    datalake_properties["minute_metrics"] = Metrics._from_generated(    # pylint: disable=protected-access
+        datalake_properties["minute_metrics"])
+    datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated(   # pylint: disable=protected-access
+        datalake_properties["delete_retention_policy"])
+    datalake_properties["static_website"] = StaticWebsite._from_generated(  # pylint: disable=protected-access
+        datalake_properties["static_website"])
+    return datalake_properties
+
+
+def from_blob_properties(blob_properties, **additional_args):
+    file_props = FileProperties()
+    file_props.name = blob_properties.name
+    file_props.etag = blob_properties.etag
+    file_props.deleted = blob_properties.deleted
+    file_props.metadata = blob_properties.metadata
+    file_props.lease = blob_properties.lease
+    file_props.lease.__class__ = LeaseProperties
+    file_props.last_modified = blob_properties.last_modified
+    file_props.creation_time = blob_properties.creation_time
+    file_props.size = blob_properties.size
+    file_props.deleted_time = blob_properties.deleted_time
+    file_props.remaining_retention_days = blob_properties.remaining_retention_days
+    file_props.content_settings = blob_properties.content_settings
+
+    # Parse additional Datalake-only properties
+    file_props.encryption_context = additional_args.pop('encryption_context', None)
+    file_props.owner = additional_args.pop('owner', None)
+    file_props.group = additional_args.pop('group', None)
+    file_props.permissions = additional_args.pop('permissions', None)
+    file_props.acl = additional_args.pop('acl', None)
+
+    return file_props
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = value
+    return normalized
+
+
+def process_storage_error(storage_error) -> NoReturn:  # pylint:disable=too-many-statements
+    raise_error = HttpResponseError
+    serialized = False
+    if not storage_error.response:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        if error_dict:
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error.
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.invalid_property_name,
+                              StorageErrorCode.invalid_source_uri,
+                              StorageErrorCode.source_path_not_found,
+                              StorageErrorCode.lease_name_mismatch,
+                              StorageErrorCode.file_system_not_found,
+                              StorageErrorCode.path_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.invalid_destination_path,
+                              StorageErrorCode.invalid_rename_source_path,
+                              StorageErrorCode.lease_is_already_broken,
+                              StorageErrorCode.invalid_source_or_destination_resource_type,
+                              StorageErrorCode.rename_destination_parent_path_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.source_path_is_being_deleted,
+                              StorageErrorCode.path_already_exists,
+                              StorageErrorCode.destination_path_is_being_deleted,
+                              StorageErrorCode.file_system_already_exists,
+                              StorageErrorCode.file_system_being_deleted,
+                              StorageErrorCode.path_conflict]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py
new file mode 100644
index 00000000..da8c879a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_download.py
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import IO, Iterator, Optional
+
+from ._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+    """A streaming object to download from Azure Storage.
+
+    :ivar str name:
+        The name of the file being downloaded.
+    :ivar ~azure.storage.filedatalake.FileProperties properties:
+        The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties.
+    :ivar int size:
+        The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file.
+    """
+
+    def __init__(self, downloader):
+        self._downloader = downloader
+        self.name = self._downloader.name
+
+        # Parse additional Datalake-only properties
+        encryption_context = self._downloader._response.response.headers.get('x-ms-encryption-context')
+        acl = self._downloader._response.response.headers.get('x-ms-acl')
+
+        self.properties = from_blob_properties(
+            self._downloader.properties,
+            encryption_context=encryption_context,
+            acl=acl)
+        self.size = self._downloader.size
+
+    def __len__(self):
+        return self.size
+
+    def chunks(self) -> Iterator[bytes]:
+        """Iterate over chunks in the download stream.
+
+        :returns: An iterator containing the chunks in the download stream.
+        :rtype: Iterator[bytes]
+        """
+        return self._downloader.chunks()
+
+    def read(self, size: Optional[int] = -1) -> bytes:
+        """
+        Read up to size bytes from the stream and return them. If size
+        is unspecified or is -1, all bytes will be read.
+
+        :param int size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set to -1 to download all bytes.
+        :returns:
+            The requested data as bytes. If the return value is empty, there is no more data to read.
+        :rtype: bytes
+        """
+        return self._downloader.read(size)
+
+    def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :returns: The contents of the specified file.
+        :rtype: bytes
+        """
+        return self._downloader.readall()
+
+    def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        return self._downloader.readinto(stream)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py
new file mode 100644
index 00000000..7017527f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_file_system_client.py
@@ -0,0 +1,1074 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+from urllib.parse import urlparse, quote, unquote
+
+from typing_extensions import Self
+
+from azure.core.pipeline import Pipeline
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import ContainerClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._serialize import convert_dfs_url_to_blob_url, get_api_version
+from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged
+from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \
+    DirectoryProperties
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._generated.models import ListBlobsIncludeItem
+from ._deserialize import process_storage_error, is_file_path
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from datetime import datetime
+    from ._models import PathProperties
+
+
+class FileSystemClient(StorageAccountHostsMixin):
+    """A client to interact with a specific file system, even if that file system
+    may not yet exist.
+
+    For operations relating to a specific directory or file within this file system, a directory client or file client
+    can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_file_system.py
+            :start-after: [START create_file_system_client_from_service]
+            :end-before: [END create_file_system_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+    """
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+        if not file_system_name:
+            raise ValueError("Please specify a file system name.")
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        # TODO: add self.account_url to base_client and remove _blob_account_url
+        self._blob_account_url = blob_account_url
+
+        datalake_hosts = kwargs.pop('_hosts', None)
+        blob_hosts = None
+        if datalake_hosts:
+            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+        self._container_client = ContainerClient(blob_account_url, file_system_name,
+                                                 credential=credential, _hosts=blob_hosts, **kwargs)
+
+        _, sas_token = parse_query(parsed_url.query)
+        self.file_system_name = file_system_name
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+                                               _hosts=datalake_hosts, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url,
+                                                   file_system=file_system_name, pipeline=self._pipeline)
+        api_version = get_api_version(kwargs)
+        self._client._config.version = api_version
+        self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+                                                                               base_url=self._container_client.url,
+                                                                               file_system=file_system_name,
+                                                                               pipeline=self._pipeline)
+        self._datalake_client_for_blob_operation._config.version = api_version
+
+    def _format_url(self, hostname):
+        file_system_name = self.file_system_name
+        if isinstance(file_system_name, str):
+            file_system_name = file_system_name.encode('UTF-8')
+        return f"{self.scheme}://{hostname}/{quote(file_system_name)}{self._query_str}"
+
+    def __exit__(self, *args):
+        self._container_client.close()
+        self._datalake_client_for_blob_operation.close()
+        super(FileSystemClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    @classmethod
+    def from_connection_string(
+            cls, conn_str: str,
+            file_system_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> Self:
+        """
+        Create FileSystemClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param file_system_name: The name of file system to interact with.
+        :type file_system_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system_client_from_connection_string]
+                :end-before: [END create_file_system_client_from_connection_string]
+                :language: python
+                :dedent: 8
+                :caption: Create FileSystemClient from connection string
+        """
+        account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+        return cls(
+            account_url, file_system_name=file_system_name, credential=credential, **kwargs)
+
+    @distributed_trace
+    def acquire_lease(
+        self, lease_duration=-1,  # type: int
+        lease_id=None,  # type: Optional[str]
+        **kwargs
+    ):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file system does not have an active lease,
+        the DataLake service creates a lease on the file system and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START acquire_lease_on_file_system]
+                :end-before: [END acquire_lease_on_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Acquiring a lease on the file system.
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)
+        lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace
+    def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
+                           public_access=None,  # type: Optional[PublicAccess]
+                           **kwargs):
+        # type: (...) ->  Dict[str, Union[str, datetime]]
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_system]
+                :end-before: [END create_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Creating a file system in the datalake service.
+        """
+        encryption_scope_options = kwargs.pop('encryption_scope_options', None)
+        return self._container_client.create_container(metadata=metadata,
+                                                       public_access=public_access,
+                                                       container_encryption_scope=encryption_scope_options,
+                                                       **kwargs)
+
+    @distributed_trace
+    def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file system exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file system exists, False otherwise.
+        :rtype: bool
+        """
+        return self._container_client.exists(**kwargs)
+
+    def _rename_file_system(self, new_name, **kwargs):
+        # type: (str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with renamed properties.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        self._container_client._rename_container(new_name, **kwargs)   # pylint: disable=protected-access
+        #TODO: self._raw_credential would not work with SAS tokens
+        renamed_file_system = FileSystemClient(
+                f"{self.scheme}://{self.primary_hostname}", file_system_name=new_name,
+                credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts)
+        return renamed_file_system
+
+    @distributed_trace
+    def delete_file_system(self, **kwargs):
+        # type: (Any) -> None
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_file_system]
+                :end-before: [END delete_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Deleting a file system in the datalake service.
+        """
+        self._container_client.delete_container(**kwargs)
+
+    @distributed_trace
+    def get_file_system_properties(self, **kwargs):
+        # type: (Any) -> FileSystemProperties
+        """Returns all user-defined metadata and system properties for the specified
+        file system. The data returned does not include the file system's list of paths.
+
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, get_file_system_properties only succeeds if the
+            file system's lease is active and matches this ID.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Properties for the specified file system within a file system object.
+        :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_file_system_properties]
+                :end-before: [END get_file_system_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting properties on the file system.
+        """
+        container_properties = self._container_client.get_container_properties(**kwargs)
+        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def set_file_system_metadata(  # type: ignore
+        self, metadata,  # type: Dict[str, str]
+        **kwargs
+    ):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: filesystem-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START set_file_system_metadata]
+                :end-before: [END set_file_system_metadata]
+                :language: python
+                :dedent: 12
+                :caption: Setting metadata on the file system.
+        """
+        return self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def set_file_system_access_policy(
+            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
+            public_access=None,  # type: Optional[Union[str, PublicAccess]]
+            **kwargs
+    ):  # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets the permissions for the specified file system or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a file system may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the file system. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+        :param ~azure.storage.filedatalake.PublicAccess public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :keyword lease:
+            Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: File System-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return self._container_client.set_container_access_policy(signed_identifiers,
+                                                                  public_access=public_access, **kwargs)
+
+    @distributed_trace
+    def get_file_system_access_policy(self, **kwargs):
+        # type: (Any) -> Dict[str, Any]
+        """Gets the permissions for the specified file system.
+        The permissions indicate whether file system data may be accessed publicly.
+
+        :keyword lease:
+            If specified, the operation only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_policy = self._container_client.get_container_access_policy(**kwargs)
+        return {
+            'public_access': PublicAccess._from_generated(access_policy['public_access']),  # pylint: disable=protected-access
+            'signed_identifiers': access_policy['signed_identifiers']
+        }
+
+    @distributed_trace
+    def get_paths(
+        self, path: Optional[str] = None,
+        recursive: Optional[bool] = True,
+        max_results: Optional[int] = None,
+        **kwargs: Any
+    ) -> ItemPaged["PathProperties"]:
+        """Returns a generator to list the paths(could be files or directories) under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param Optional[str] path:
+            Filters the results to return only paths under the specified path.
+        :param Optional[bool] recursive: Optional. Set True for recursive, False for iterative.
+        :param Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_paths_in_file_system]
+                :end-before: [END get_paths_in_file_system]
+                :language: python
+                :dedent: 8
+                :caption: List the paths in the file system.
+        """
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.file_system.list_paths,
+            path=path,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, recursive, path=path, max_results=max_results,
+            page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    @distributed_trace
+    def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                         metadata=None,  # type: Optional[Dict[str, str]]
+                         **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create directory
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient with new directory and metadata.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_directory_from_file_system]
+                :end-before: [END create_directory_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Create directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        directory_client.create_directory(metadata=metadata, **kwargs)
+        return directory_client
+
+    @distributed_trace
+    def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                         **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified path for deletion.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient after deleting specified directory.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_directory_from_file_system]
+                :end-before: [END delete_directory_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Delete directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        directory_client.delete_directory(**kwargs)
+        return directory_client
+
+    @distributed_trace
+    def create_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create file
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with new file created.
+        :rtype: ~azure.storage.file.datalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START create_file_from_file_system]
+                :end-before: [END create_file_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Create file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def delete_file(self, file,  # type: Union[FileProperties, str]
+                    **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Marks the specified file for deletion.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: DataLakeFileClient after deleting specified file.
+        :rtype: azure.storage.file.datalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START delete_file_from_file_system]
+                :end-before: [END delete_file_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Delete file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        file_client.delete_file(**kwargs)
+        return file_client
+
+    def _undelete_path_options(self, deleted_path_name, deletion_id):
+        quoted_path = quote(unquote(deleted_path_name.strip('/')))
+
+        url_and_token = self.url.replace('.dfs.', '.blob.').split('?')
+        try:
+            url = url_and_token[0] + '/' + quoted_path + url_and_token[1]
+        except IndexError:
+            url = url_and_token[0] + '/' + quoted_path
+
+        undelete_source = quoted_path + f'?deletionid={deletion_id}' if deletion_id else None
+
+        return quoted_path, url, undelete_source
+
+    def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+        # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+        """Restores soft-deleted path.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :param str deleted_path_name:
+            Specifies the path (file or directory) to restore.
+        :param str deletion_id:
+            Specifies the version of the deleted path to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Returns the DataLake client for the restored soft-deleted path.
+        :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient
+        """
+        _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+        pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        path_client = AzureDataLakeStorageRESTAPI(
+            url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+        try:
+            is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+            if is_file:
+                return self.get_file_client(deleted_path_name)
+            return self.get_directory_client(deleted_path_name)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_root_directory_client(self):
+        # type: () -> DataLakeDirectoryClient
+        """Get a client to interact with the root directory.
+
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+        """
+        return self.get_directory_client('/')
+
+    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_directory_client_from_file_system]
+                :end-before: [END get_directory_client_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            directory_name = directory.get('name')
+        except AttributeError:
+            directory_name = str(directory)
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_path:
+            The file with which to interact. This can either be the path of the file(from root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system.py
+                :start-after: [START get_file_client_from_file_system]
+                :end-before: [END get_file_client_from_file_system]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_path = file_path.get('name')
+        except AttributeError:
+            file_path = str(file_path)
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace
+    def list_deleted_paths(self, **kwargs):
+        # type: (Any) -> ItemPaged[DeletedPathProperties]
+        """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword str path_prefix:
+            Filters the results to return only paths under the specified path.
+        :keyword int results_per_page:
+            An optional value that specifies the maximum number of items to return per page.
+            If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of DeletedPathProperties.
+        :rtype:
+            ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+        """
+        path_prefix = kwargs.pop('path_prefix', None)
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+            showonly=ListBlobsIncludeItem.deleted,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+            results_per_page=results_per_page, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py
new file mode 100644
index 00000000..8a9f7149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureDataLakeStorageRESTAPI",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000..ae1c9c2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Optional
+from typing_extensions import Self
+
+from azure.core import PipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest, HttpResponse
+
+from . import models as _models
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from ._serialization import Deserializer, Serializer
+from .operations import FileSystemOperations, PathOperations, ServiceOperations
+
+
+class AzureDataLakeStorageRESTAPI:  # pylint: disable=client-accepts-api-version-keyword
+    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.filedatalake.operations.ServiceOperations
+    :ivar file_system: FileSystemOperations operations
+    :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
+    :ivar path: PathOperations operations
+    :vartype path: azure.storage.filedatalake.operations.PathOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", x_ms_lease_duration: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        self._config = AzureDataLakeStorageRESTAPIConfiguration(
+            url=url, x_ms_lease_duration=x_ms_lease_duration, **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: PipelineClient = PipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = client._send_request(request)
+        <HttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.HttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    def close(self) -> None:
+        self._client.close()
+
+    def __enter__(self) -> Self:
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *exc_details: Any) -> None:
+        self._client.__exit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py
new file mode 100644
index 00000000..ce7d9c28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_configuration.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureDataLakeStorageRESTAPIConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureDataLakeStorageRESTAPI.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, x_ms_lease_duration: Optional[int] = None, **kwargs: Any) -> None:
+        resource: Literal["filesystem"] = kwargs.pop("resource", "filesystem")
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.x_ms_lease_duration = x_ms_lease_duration
+        self.resource = resource
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azuredatalakestoragerestapi/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py
new file mode 100644
index 00000000..a066e16a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/_serialization.py
@@ -0,0 +1,2050 @@
+# pylint: disable=too-many-lines
+# --------------------------------------------------------------------------
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+#
+# The MIT License (MIT)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the ""Software""), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+    Dict,
+    Any,
+    cast,
+    Optional,
+    Union,
+    AnyStr,
+    IO,
+    Mapping,
+    Callable,
+    MutableMapping,
+    List,
+)
+
+try:
+    from urllib import quote  # type: ignore
+except ImportError:
+    from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate  # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+    # Accept "text" because we're open minded people...
+    JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+    # Name used in context
+    CONTEXT_NAME = "deserialized_data"
+
+    @classmethod
+    def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+        """Decode data according to content-type.
+
+        Accept a stream of data as well, but will be load at once in memory for now.
+
+        If no content-type, will return the string version (not bytes, not stream)
+
+        :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+        :type data: str or bytes or IO
+        :param str content_type: The content type.
+        :return: The deserialized data.
+        :rtype: object
+        """
+        if hasattr(data, "read"):
+            # Assume a stream
+            data = cast(IO, data).read()
+
+        if isinstance(data, bytes):
+            data_as_str = data.decode(encoding="utf-8-sig")
+        else:
+            # Explain to mypy the correct type.
+            data_as_str = cast(str, data)
+
+            # Remove Byte Order Mark if present in string
+            data_as_str = data_as_str.lstrip(_BOM)
+
+        if content_type is None:
+            return data
+
+        if cls.JSON_REGEXP.match(content_type):
+            try:
+                return json.loads(data_as_str)
+            except ValueError as err:
+                raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+        elif "xml" in (content_type or []):
+            try:
+
+                try:
+                    if isinstance(data, unicode):  # type: ignore
+                        # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+                        data_as_str = data_as_str.encode(encoding="utf-8")  # type: ignore
+                except NameError:
+                    pass
+
+                return ET.fromstring(data_as_str)  # nosec
+            except ET.ParseError as err:
+                # It might be because the server has an issue, and returned JSON with
+                # content-type XML....
+                # So let's try a JSON load, and if it's still broken
+                # let's flow the initial exception
+                def _json_attemp(data):
+                    try:
+                        return True, json.loads(data)
+                    except ValueError:
+                        return False, None  # Don't care about this one
+
+                success, json_result = _json_attemp(data)
+                if success:
+                    return json_result
+                # If i'm here, it's not JSON, it's not XML, let's scream
+                # and raise the last context in this block (the XML exception)
+                # The function hack is because Py2.7 messes up with exception
+                # context otherwise.
+                _LOGGER.critical("Wasn't XML not JSON, failing")
+                raise DeserializationError("XML is invalid") from err
+        elif content_type.startswith("text/"):
+            return data_as_str
+        raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+    @classmethod
+    def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+        """Deserialize from HTTP response.
+
+        Use bytes and headers to NOT use any requests/aiohttp or whatever
+        specific implementation.
+        Headers will tested for "content-type"
+
+        :param bytes body_bytes: The body of the response.
+        :param dict headers: The headers of the response.
+        :returns: The deserialized data.
+        :rtype: object
+        """
+        # Try to use content-type from headers if available
+        content_type = None
+        if "content-type" in headers:
+            content_type = headers["content-type"].split(";")[0].strip().lower()
+        # Ouch, this server did not declare what it sent...
+        # Let's guess it's JSON...
+        # Also, since Autorest was considering that an empty body was a valid JSON,
+        # need that test as well....
+        else:
+            content_type = "application/json"
+
+        if body_bytes:
+            return cls.deserialize_from_text(body_bytes, content_type)
+        return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+    _long_type = long  # type: ignore
+except NameError:
+    _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(?<!\\)\.")
+
+
+def attribute_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the Python attribute.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A key using attribute name
+    :rtype: str
+    """
+    return (key, value)
+
+
+def full_restapi_key_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the full RestAPI key path.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A list of keys using RestAPI syntax.
+    :rtype: list
+    """
+    keys = _FLATTEN.split(attr_desc["key"])
+    return ([_decode_attribute_map_key(k) for k in keys], value)
+
+
+def last_restapi_key_transformer(key, attr_desc, value):
+    """A key transformer that returns the last RestAPI key.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: The last RestAPI key.
+    :rtype: str
+    """
+    key, value = full_restapi_key_transformer(key, attr_desc, value)
+    return (key[-1], value)
+
+
+def _create_xml_node(tag, prefix=None, ns=None):
+    """Create a XML node.
+
+    :param str tag: The tag name
+    :param str prefix: The prefix
+    :param str ns: The namespace
+    :return: The XML node
+    :rtype: xml.etree.ElementTree.Element
+    """
+    if prefix and ns:
+        ET.register_namespace(prefix, ns)
+    if ns:
+        return ET.Element("{" + ns + "}" + tag)
+    return ET.Element(tag)
+
+
+class Model:
+    """Mixin for all client request body/response body models to support
+    serialization and deserialization.
+    """
+
+    _subtype_map: Dict[str, Dict[str, Any]] = {}
+    _attribute_map: Dict[str, Dict[str, Any]] = {}
+    _validation: Dict[str, Dict[str, Any]] = {}
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.additional_properties: Optional[Dict[str, Any]] = {}
+        for k in kwargs:  # pylint: disable=consider-using-dict-items
+            if k not in self._attribute_map:
+                _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+            elif k in self._validation and self._validation[k].get("readonly", False):
+                _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+            else:
+                setattr(self, k, kwargs[k])
+
+    def __eq__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are equal
+        :rtype: bool
+        """
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    def __ne__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are not equal
+        :rtype: bool
+        """
+        return not self.__eq__(other)
+
+    def __str__(self) -> str:
+        return str(self.__dict__)
+
+    @classmethod
+    def enable_additional_properties_sending(cls) -> None:
+        cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+    @classmethod
+    def is_xml_model(cls) -> bool:
+        try:
+            cls._xml_map  # type: ignore
+        except AttributeError:
+            return False
+        return True
+
+    @classmethod
+    def _create_xml_node(cls):
+        """Create XML node.
+
+        :returns: The XML node
+        :rtype: xml.etree.ElementTree.Element
+        """
+        try:
+            xml_map = cls._xml_map  # type: ignore
+        except AttributeError:
+            xml_map = {}
+
+        return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+    def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+        """Return the JSON that would be sent to server from this model.
+
+        This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, keep_readonly=keep_readonly, **kwargs
+        )
+
+    def as_dict(
+        self,
+        keep_readonly: bool = True,
+        key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer,
+        **kwargs: Any
+    ) -> JSON:
+        """Return a dict that can be serialized using json.dump.
+
+        Advanced usage might optionally use a callback as parameter:
+
+        .. code::python
+
+            def my_key_transformer(key, attr_desc, value):
+                return key
+
+        Key is the attribute name used in Python. Attr_desc
+        is a dict of metadata. Currently contains 'type' with the
+        msrest type and 'key' with the RestAPI encoded key.
+        Value is the current value in this object.
+
+        The string returned will be used to serialize the key.
+        If the return type is a list, this is considered hierarchical
+        result dict.
+
+        See the three examples in this file:
+
+        - attribute_transformer
+        - full_restapi_key_transformer
+        - last_restapi_key_transformer
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :param function key_transformer: A key transformer function.
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+        )
+
+    @classmethod
+    def _infer_class_models(cls):
+        try:
+            str_models = cls.__module__.rsplit(".", 1)[0]
+            models = sys.modules[str_models]
+            client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+            if cls.__name__ not in client_models:
+                raise ValueError("Not Autorest generated code")
+        except Exception:  # pylint: disable=broad-exception-caught
+            # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+            client_models = {cls.__name__: cls}
+        return client_models
+
+    @classmethod
+    def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+        """Parse a str using the RestAPI syntax and return a model.
+
+        :param str data: A str using RestAPI structure. JSON by default.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises DeserializationError: if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def from_dict(
+        cls,
+        data: Any,
+        key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None,
+        content_type: Optional[str] = None,
+    ) -> Self:
+        """Parse a dict using given key extractor return a model.
+
+        By default consider key
+        extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+        and last_rest_key_case_insensitive_extractor)
+
+        :param dict data: A dict using RestAPI structure
+        :param function key_extractors: A key extractor function.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises: DeserializationError if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        deserializer.key_extractors = (  # type: ignore
+            [  # type: ignore
+                attribute_key_case_insensitive_extractor,
+                rest_key_case_insensitive_extractor,
+                last_rest_key_case_insensitive_extractor,
+            ]
+            if key_extractors is None
+            else key_extractors
+        )
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def _flatten_subtype(cls, key, objects):
+        if "_subtype_map" not in cls.__dict__:
+            return {}
+        result = dict(cls._subtype_map[key])
+        for valuetype in cls._subtype_map[key].values():
+            result.update(objects[valuetype]._flatten_subtype(key, objects))  # pylint: disable=protected-access
+        return result
+
+    @classmethod
+    def _classify(cls, response, objects):
+        """Check the class _subtype_map for any child classes.
+        We want to ignore any inherited _subtype_maps.
+
+        :param dict response: The initial data
+        :param dict objects: The class objects
+        :returns: The class to be used
+        :rtype: class
+        """
+        for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+            subtype_value = None
+
+            if not isinstance(response, ET.Element):
+                rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+                subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+            else:
+                subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+            if subtype_value:
+                # Try to match base class. Can be class name only
+                # (bug to fix in Autorest to support x-ms-discriminator-name)
+                if cls.__name__ == subtype_value:
+                    return cls
+                flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+                try:
+                    return objects[flatten_mapping_type[subtype_value]]  # type: ignore
+                except KeyError:
+                    _LOGGER.warning(
+                        "Subtype value %s has no mapping, use base class %s.",
+                        subtype_value,
+                        cls.__name__,
+                    )
+                    break
+            else:
+                _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+                break
+        return cls
+
+    @classmethod
+    def _get_rest_key_parts(cls, attr_key):
+        """Get the RestAPI key of this attr, split it and decode part
+        :param str attr_key: Attribute key must be in attribute_map.
+        :returns: A list of RestAPI part
+        :rtype: list
+        """
+        rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+        return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+    """This decode a key in an _attribute_map to the actual key we want to look at
+    inside the received data.
+
+    :param str key: A key string from the generated code
+    :returns: The decoded key
+    :rtype: str
+    """
+    return key.replace("\\.", ".")
+
+
+class Serializer:  # pylint: disable=too-many-public-methods
+    """Request object model serializer."""
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+    days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+    months = {
+        1: "Jan",
+        2: "Feb",
+        3: "Mar",
+        4: "Apr",
+        5: "May",
+        6: "Jun",
+        7: "Jul",
+        8: "Aug",
+        9: "Sep",
+        10: "Oct",
+        11: "Nov",
+        12: "Dec",
+    }
+    validation = {
+        "min_length": lambda x, y: len(x) < y,
+        "max_length": lambda x, y: len(x) > y,
+        "minimum": lambda x, y: x < y,
+        "maximum": lambda x, y: x > y,
+        "minimum_ex": lambda x, y: x <= y,
+        "maximum_ex": lambda x, y: x >= y,
+        "min_items": lambda x, y: len(x) < y,
+        "max_items": lambda x, y: len(x) > y,
+        "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+        "unique": lambda x, y: len(x) != len(set(x)),
+        "multiple": lambda x, y: x % y != 0,
+    }
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.serialize_type = {
+            "iso-8601": Serializer.serialize_iso,
+            "rfc-1123": Serializer.serialize_rfc,
+            "unix-time": Serializer.serialize_unix,
+            "duration": Serializer.serialize_duration,
+            "date": Serializer.serialize_date,
+            "time": Serializer.serialize_time,
+            "decimal": Serializer.serialize_decimal,
+            "long": Serializer.serialize_long,
+            "bytearray": Serializer.serialize_bytearray,
+            "base64": Serializer.serialize_base64,
+            "object": self.serialize_object,
+            "[]": self.serialize_iter,
+            "{}": self.serialize_dict,
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_transformer = full_restapi_key_transformer
+        self.client_side_validation = True
+
+    def _serialize(  # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+        self, target_obj, data_type=None, **kwargs
+    ):
+        """Serialize data into a string according to type.
+
+        :param object target_obj: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, dict
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        """
+        key_transformer = kwargs.get("key_transformer", self.key_transformer)
+        keep_readonly = kwargs.get("keep_readonly", False)
+        if target_obj is None:
+            return None
+
+        attr_name = None
+        class_name = target_obj.__class__.__name__
+
+        if data_type:
+            return self.serialize_data(target_obj, data_type, **kwargs)
+
+        if not hasattr(target_obj, "_attribute_map"):
+            data_type = type(target_obj).__name__
+            if data_type in self.basic_types.values():
+                return self.serialize_data(target_obj, data_type, **kwargs)
+
+        # Force "is_xml" kwargs if we detect a XML model
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+        serialized = {}
+        if is_xml_model_serialization:
+            serialized = target_obj._create_xml_node()  # pylint: disable=protected-access
+        try:
+            attributes = target_obj._attribute_map  # pylint: disable=protected-access
+            for attr, attr_desc in attributes.items():
+                attr_name = attr
+                if not keep_readonly and target_obj._validation.get(  # pylint: disable=protected-access
+                    attr_name, {}
+                ).get("readonly", False):
+                    continue
+
+                if attr_name == "additional_properties" and attr_desc["key"] == "":
+                    if target_obj.additional_properties is not None:
+                        serialized.update(target_obj.additional_properties)
+                    continue
+                try:
+
+                    orig_attr = getattr(target_obj, attr)
+                    if is_xml_model_serialization:
+                        pass  # Don't provide "transformer" for XML for now. Keep "orig_attr"
+                    else:  # JSON
+                        keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+                        keys = keys if isinstance(keys, list) else [keys]
+
+                    kwargs["serialization_ctxt"] = attr_desc
+                    new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+                    if is_xml_model_serialization:
+                        xml_desc = attr_desc.get("xml", {})
+                        xml_name = xml_desc.get("name", attr_desc["key"])
+                        xml_prefix = xml_desc.get("prefix", None)
+                        xml_ns = xml_desc.get("ns", None)
+                        if xml_desc.get("attr", False):
+                            if xml_ns:
+                                ET.register_namespace(xml_prefix, xml_ns)
+                                xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+                            serialized.set(xml_name, new_attr)  # type: ignore
+                            continue
+                        if xml_desc.get("text", False):
+                            serialized.text = new_attr  # type: ignore
+                            continue
+                        if isinstance(new_attr, list):
+                            serialized.extend(new_attr)  # type: ignore
+                        elif isinstance(new_attr, ET.Element):
+                            # If the down XML has no XML/Name,
+                            # we MUST replace the tag with the local tag. But keeping the namespaces.
+                            if "name" not in getattr(orig_attr, "_xml_map", {}):
+                                splitted_tag = new_attr.tag.split("}")
+                                if len(splitted_tag) == 2:  # Namespace
+                                    new_attr.tag = "}".join([splitted_tag[0], xml_name])
+                                else:
+                                    new_attr.tag = xml_name
+                            serialized.append(new_attr)  # type: ignore
+                        else:  # That's a basic type
+                            # Integrate namespace if necessary
+                            local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+                            local_node.text = str(new_attr)
+                            serialized.append(local_node)  # type: ignore
+                    else:  # JSON
+                        for k in reversed(keys):  # type: ignore
+                            new_attr = {k: new_attr}
+
+                        _new_attr = new_attr
+                        _serialized = serialized
+                        for k in keys:  # type: ignore
+                            if k not in _serialized:
+                                _serialized.update(_new_attr)  # type: ignore
+                            _new_attr = _new_attr[k]  # type: ignore
+                            _serialized = _serialized[k]
+                except ValueError as err:
+                    if isinstance(err, SerializationError):
+                        raise
+
+        except (AttributeError, KeyError, TypeError) as err:
+            msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+            raise SerializationError(msg) from err
+        return serialized
+
+    def body(self, data, data_type, **kwargs):
+        """Serialize data intended for a request body.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: dict
+        :raises SerializationError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized request body
+        """
+
+        # Just in case this is a dict
+        internal_data_type_str = data_type.strip("[]{}")
+        internal_data_type = self.dependencies.get(internal_data_type_str, None)
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            if internal_data_type and issubclass(internal_data_type, Model):
+                is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+            else:
+                is_xml_model_serialization = False
+        if internal_data_type and not isinstance(internal_data_type, Enum):
+            try:
+                deserializer = Deserializer(self.dependencies)
+                # Since it's on serialization, it's almost sure that format is not JSON REST
+                # We're not able to deal with additional properties for now.
+                deserializer.additional_properties_detection = False
+                if is_xml_model_serialization:
+                    deserializer.key_extractors = [  # type: ignore
+                        attribute_key_case_insensitive_extractor,
+                    ]
+                else:
+                    deserializer.key_extractors = [
+                        rest_key_case_insensitive_extractor,
+                        attribute_key_case_insensitive_extractor,
+                        last_rest_key_case_insensitive_extractor,
+                    ]
+                data = deserializer._deserialize(data_type, data)  # pylint: disable=protected-access
+            except DeserializationError as err:
+                raise SerializationError("Unable to build a model: " + str(err)) from err
+
+        return self._serialize(data, data_type, **kwargs)
+
+    def url(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL path.
+
+        :param str name: The name of the URL path parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :returns: The serialized URL path
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        """
+        try:
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+                output = output.replace("{", quote("{")).replace("}", quote("}"))
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return output
+
+    def query(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL query.
+
+        :param str name: The name of the query parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, list
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized query parameter
+        """
+        try:
+            # Treat the list aside, since we don't want to encode the div separator
+            if data_type.startswith("["):
+                internal_data_type = data_type[1:-1]
+                do_quote = not kwargs.get("skip_quote", False)
+                return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+            # Not a list, regular serialization
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def header(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a request header.
+
+        :param str name: The name of the header.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized header
+        """
+        try:
+            if data_type in ["[str]"]:
+                data = ["" if d is None else d for d in data]
+
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def serialize_data(self, data, data_type, **kwargs):
+        """Serialize generic data according to supplied data type.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :raises AttributeError: if required data is None.
+        :raises ValueError: if data is None
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        :rtype: str, int, float, bool, dict, list
+        """
+        if data is None:
+            raise ValueError("No value for given attribute")
+
+        try:
+            if data is CoreNull:
+                return None
+            if data_type in self.basic_types.values():
+                return self.serialize_basic(data, data_type, **kwargs)
+
+            if data_type in self.serialize_type:
+                return self.serialize_type[data_type](data, **kwargs)
+
+            # If dependencies is empty, try with current data class
+            # It has to be a subclass of Enum anyway
+            enum_type = self.dependencies.get(data_type, data.__class__)
+            if issubclass(enum_type, Enum):
+                return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.serialize_type:
+                return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+        except (ValueError, TypeError) as err:
+            msg = "Unable to serialize value: {!r} as type: {!r}."
+            raise SerializationError(msg.format(data, data_type)) from err
+        return self._serialize(data, **kwargs)
+
+    @classmethod
+    def _get_custom_serializers(cls, data_type, **kwargs):  # pylint: disable=inconsistent-return-statements
+        custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+        if custom_serializer:
+            return custom_serializer
+        if kwargs.get("is_xml", False):
+            return cls._xml_basic_types_serializers.get(data_type)
+
+    @classmethod
+    def serialize_basic(cls, data, data_type, **kwargs):
+        """Serialize basic builting data type.
+        Serializes objects to str, int, float or bool.
+
+        Possible kwargs:
+        - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+        - is_xml bool : If set, use xml_basic_types_serializers
+
+        :param obj data: Object to be serialized.
+        :param str data_type: Type of object in the iterable.
+        :rtype: str, int, float, bool
+        :return: serialized object
+        """
+        custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+        if custom_serializer:
+            return custom_serializer(data)
+        if data_type == "str":
+            return cls.serialize_unicode(data)
+        return eval(data_type)(data)  # nosec # pylint: disable=eval-used
+
+    @classmethod
+    def serialize_unicode(cls, data):
+        """Special handling for serializing unicode strings in Py2.
+        Encode to UTF-8 if unicode, otherwise handle as a str.
+
+        :param str data: Object to be serialized.
+        :rtype: str
+        :return: serialized object
+        """
+        try:  # If I received an enum, return its value
+            return data.value
+        except AttributeError:
+            pass
+
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                # Don't change it, JSON and XML ElementTree are totally able
+                # to serialize correctly u'' strings
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    def serialize_iter(self, data, iter_type, div=None, **kwargs):
+        """Serialize iterable.
+
+        Supported kwargs:
+        - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+          serialization_ctxt['type'] should be same as data_type.
+        - is_xml bool : If set, serialize as XML
+
+        :param list data: Object to be serialized.
+        :param str iter_type: Type of object in the iterable.
+        :param str div: If set, this str will be used to combine the elements
+         in the iterable into a combined string. Default is 'None'.
+        Defaults to False.
+        :rtype: list, str
+        :return: serialized iterable
+        """
+        if isinstance(data, str):
+            raise SerializationError("Refuse str type as a valid iter type.")
+
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        is_xml = kwargs.get("is_xml", False)
+
+        serialized = []
+        for d in data:
+            try:
+                serialized.append(self.serialize_data(d, iter_type, **kwargs))
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized.append(None)
+
+        if kwargs.get("do_quote", False):
+            serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+        if div:
+            serialized = ["" if s is None else str(s) for s in serialized]
+            serialized = div.join(serialized)
+
+        if "xml" in serialization_ctxt or is_xml:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt.get("xml", {})
+            xml_name = xml_desc.get("name")
+            if not xml_name:
+                xml_name = serialization_ctxt["key"]
+
+            # Create a wrap node if necessary (use the fact that Element and list have "append")
+            is_wrapped = xml_desc.get("wrapped", False)
+            node_name = xml_desc.get("itemsName", xml_name)
+            if is_wrapped:
+                final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            else:
+                final_result = []
+            # All list elements to "local_node"
+            for el in serialized:
+                if isinstance(el, ET.Element):
+                    el_node = el
+                else:
+                    el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+                    if el is not None:  # Otherwise it writes "None" :-p
+                        el_node.text = str(el)
+                final_result.append(el_node)
+            return final_result
+        return serialized
+
+    def serialize_dict(self, attr, dict_type, **kwargs):
+        """Serialize a dictionary of objects.
+
+        :param dict attr: Object to be serialized.
+        :param str dict_type: Type of object in the dictionary.
+        :rtype: dict
+        :return: serialized dictionary
+        """
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        serialized = {}
+        for key, value in attr.items():
+            try:
+                serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized[self.serialize_unicode(key)] = None
+
+        if "xml" in serialization_ctxt:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt["xml"]
+            xml_name = xml_desc["name"]
+
+            final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            for key, value in serialized.items():
+                ET.SubElement(final_result, key).text = value
+            return final_result
+
+        return serialized
+
+    def serialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Serialize a generic object.
+        This will be handled as a dictionary. If object passed in is not
+        a basic type (str, int, float, dict, list) it will simply be
+        cast to str.
+
+        :param dict attr: Object to be serialized.
+        :rtype: dict or str
+        :return: serialized object
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            return attr
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+        if obj_type is _long_type:
+            return self.serialize_long(attr)
+        if obj_type is str:
+            return self.serialize_unicode(attr)
+        if obj_type is datetime.datetime:
+            return self.serialize_iso(attr)
+        if obj_type is datetime.date:
+            return self.serialize_date(attr)
+        if obj_type is datetime.time:
+            return self.serialize_time(attr)
+        if obj_type is datetime.timedelta:
+            return self.serialize_duration(attr)
+        if obj_type is decimal.Decimal:
+            return self.serialize_decimal(attr)
+
+        # If it's a model or I know this dependency, serialize as a Model
+        if obj_type in self.dependencies.values() or isinstance(attr, Model):
+            return self._serialize(attr)
+
+        if obj_type == dict:
+            serialized = {}
+            for key, value in attr.items():
+                try:
+                    serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+                except ValueError:
+                    serialized[self.serialize_unicode(key)] = None
+            return serialized
+
+        if obj_type == list:
+            serialized = []
+            for obj in attr:
+                try:
+                    serialized.append(self.serialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return serialized
+        return str(attr)
+
+    @staticmethod
+    def serialize_enum(attr, enum_obj=None):
+        try:
+            result = attr.value
+        except AttributeError:
+            result = attr
+        try:
+            enum_obj(result)  # type: ignore
+            return result
+        except ValueError as exc:
+            for enum_value in enum_obj:  # type: ignore
+                if enum_value.value.lower() == str(attr).lower():
+                    return enum_value.value
+            error = "{!r} is not valid value for enum {!r}"
+            raise SerializationError(error.format(attr, enum_obj)) from exc
+
+    @staticmethod
+    def serialize_bytearray(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize bytearray into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        return b64encode(attr).decode()
+
+    @staticmethod
+    def serialize_base64(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize str into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        encoded = b64encode(attr).decode("ascii")
+        return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+    @staticmethod
+    def serialize_decimal(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Decimal object to float.
+
+        :param decimal attr: Object to be serialized.
+        :rtype: float
+        :return: serialized decimal
+        """
+        return float(attr)
+
+    @staticmethod
+    def serialize_long(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize long (Py2) or int (Py3).
+
+        :param int attr: Object to be serialized.
+        :rtype: int/long
+        :return: serialized long
+        """
+        return _long_type(attr)
+
+    @staticmethod
+    def serialize_date(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Date object into ISO-8601 formatted string.
+
+        :param Date attr: Object to be serialized.
+        :rtype: str
+        :return: serialized date
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_date(attr)
+        t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+        return t
+
+    @staticmethod
+    def serialize_time(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Time object into ISO-8601 formatted string.
+
+        :param datetime.time attr: Object to be serialized.
+        :rtype: str
+        :return: serialized time
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_time(attr)
+        t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+        if attr.microsecond:
+            t += ".{:02}".format(attr.microsecond)
+        return t
+
+    @staticmethod
+    def serialize_duration(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize TimeDelta object into ISO-8601 formatted string.
+
+        :param TimeDelta attr: Object to be serialized.
+        :rtype: str
+        :return: serialized duration
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_duration(attr)
+        return isodate.duration_isoformat(attr)
+
+    @staticmethod
+    def serialize_rfc(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into RFC-1123 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises TypeError: if format invalid.
+        :return: serialized rfc
+        """
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+        except AttributeError as exc:
+            raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+        return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+            Serializer.days[utc.tm_wday],
+            utc.tm_mday,
+            Serializer.months[utc.tm_mon],
+            utc.tm_year,
+            utc.tm_hour,
+            utc.tm_min,
+            utc.tm_sec,
+        )
+
+    @staticmethod
+    def serialize_iso(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into ISO-8601 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises SerializationError: if format invalid.
+        :return: serialized iso
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_datetime(attr)
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+            if utc.tm_year > 9999 or utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+
+            microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+            if microseconds:
+                microseconds = "." + microseconds
+            date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+                utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+            )
+            return date + microseconds + "Z"
+        except (ValueError, OverflowError) as err:
+            msg = "Unable to serialize datetime object."
+            raise SerializationError(msg) from err
+        except AttributeError as err:
+            msg = "ISO-8601 object must be valid Datetime object."
+            raise TypeError(msg) from err
+
+    @staticmethod
+    def serialize_unix(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: int
+        :raises SerializationError: if format invalid
+        :return: serialied unix
+        """
+        if isinstance(attr, int):
+            return attr
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            return int(calendar.timegm(attr.utctimetuple()))
+        except AttributeError as exc:
+            raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        # Need the cast, as for some reasons "split" is typed as list[str | Any]
+        dict_keys = cast(List[str], _FLATTEN.split(key))
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = working_data.get(working_key, data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor(  # pylint: disable=unused-argument, inconsistent-return-statements
+    attr, attr_desc, data
+):
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        dict_keys = _FLATTEN.split(key)
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    if working_data:
+        return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    This is the case insensitive version of "last_rest_key_extractor"
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+    return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+    found_key = None
+    lower_attr = attr.lower()
+    for key in data:
+        if lower_attr == key.lower():
+            found_key = key
+            break
+
+    return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+    """Given an internal type XML description, extract correct XML name with namespace.
+
+    :param dict internal_type: An model type
+    :rtype: tuple
+    :returns: A tuple XML name + namespace dict
+    """
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+    xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+    xml_ns = internal_type_xml_map.get("ns", None)
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+    return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument,too-many-return-statements
+    if isinstance(data, dict):
+        return None
+
+    # Test if this model is XML ready first
+    if not isinstance(data, ET.Element):
+        return None
+
+    xml_desc = attr_desc.get("xml", {})
+    xml_name = xml_desc.get("name", attr_desc["key"])
+
+    # Look for a children
+    is_iter_type = attr_desc["type"].startswith("[")
+    is_wrapped = xml_desc.get("wrapped", False)
+    internal_type = attr_desc.get("internalType", None)
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+    # Integrate namespace if necessary
+    xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+    # If it's an attribute, that's simple
+    if xml_desc.get("attr", False):
+        return data.get(xml_name)
+
+    # If it's x-ms-text, that's simple too
+    if xml_desc.get("text", False):
+        return data.text
+
+    # Scenario where I take the local name:
+    # - Wrapped node
+    # - Internal type is an enum (considered basic types)
+    # - Internal type has no XML/Name node
+    if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+        children = data.findall(xml_name)
+    # If internal type has a local name and it's not a list, I use that name
+    elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+        xml_name = _extract_name_from_internal_type(internal_type)
+        children = data.findall(xml_name)
+    # That's an array
+    else:
+        if internal_type:  # Complex type, ignore itemsName and use the complex type name
+            items_name = _extract_name_from_internal_type(internal_type)
+        else:
+            items_name = xml_desc.get("itemsName", xml_name)
+        children = data.findall(items_name)
+
+    if len(children) == 0:
+        if is_iter_type:
+            if is_wrapped:
+                return None  # is_wrapped no node, we want None
+            return []  # not wrapped, assume empty list
+        return None  # Assume it's not there, maybe an optional node.
+
+    # If is_iter_type and not wrapped, return all found children
+    if is_iter_type:
+        if not is_wrapped:
+            return children
+        # Iter and wrapped, should have found one node only (the wrap one)
+        if len(children) != 1:
+            raise DeserializationError(
+                "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(  # pylint: disable=line-too-long
+                    xml_name
+                )
+            )
+        return list(children[0])  # Might be empty list and that's ok.
+
+    # Here it's not a itertype, we should have found one element only or empty
+    if len(children) > 1:
+        raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+    return children[0]
+
+
+class Deserializer:
+    """Response object model deserializer.
+
+    :param dict classes: Class type dictionary for deserializing complex types.
+    :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+    """
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.deserialize_type = {
+            "iso-8601": Deserializer.deserialize_iso,
+            "rfc-1123": Deserializer.deserialize_rfc,
+            "unix-time": Deserializer.deserialize_unix,
+            "duration": Deserializer.deserialize_duration,
+            "date": Deserializer.deserialize_date,
+            "time": Deserializer.deserialize_time,
+            "decimal": Deserializer.deserialize_decimal,
+            "long": Deserializer.deserialize_long,
+            "bytearray": Deserializer.deserialize_bytearray,
+            "base64": Deserializer.deserialize_base64,
+            "object": self.deserialize_object,
+            "[]": self.deserialize_iter,
+            "{}": self.deserialize_dict,
+        }
+        self.deserialize_expected_types = {
+            "duration": (isodate.Duration, datetime.timedelta),
+            "iso-8601": (datetime.datetime),
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_extractors = [rest_key_extractor, xml_key_extractor]
+        # Additional properties only works if the "rest_key_extractor" is used to
+        # extract the keys. Making it to work whatever the key extractor is too much
+        # complicated, with no real scenario for now.
+        # So adding a flag to disable additional properties detection. This flag should be
+        # used if your expect the deserialization to NOT come from a JSON REST syntax.
+        # Otherwise, result are unexpected
+        self.additional_properties_detection = True
+
+    def __call__(self, target_obj, response_data, content_type=None):
+        """Call the deserializer to process a REST response.
+
+        :param str target_obj: Target data type to deserialize to.
+        :param requests.Response response_data: REST response object.
+        :param str content_type: Swagger "produces" if available.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        data = self._unpack_content(response_data, content_type)
+        return self._deserialize(target_obj, data)
+
+    def _deserialize(self, target_obj, data):  # pylint: disable=inconsistent-return-statements
+        """Call the deserializer on a model.
+
+        Data needs to be already deserialized as JSON or XML ElementTree
+
+        :param str target_obj: Target data type to deserialize to.
+        :param object data: Object to deserialize.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        # This is already a model, go recursive just in case
+        if hasattr(data, "_attribute_map"):
+            constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+            try:
+                for attr, mapconfig in data._attribute_map.items():  # pylint: disable=protected-access
+                    if attr in constants:
+                        continue
+                    value = getattr(data, attr)
+                    if value is None:
+                        continue
+                    local_type = mapconfig["type"]
+                    internal_data_type = local_type.strip("[]{}")
+                    if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+                        continue
+                    setattr(data, attr, self._deserialize(local_type, value))
+                return data
+            except AttributeError:
+                return
+
+        response, class_name = self._classify_target(target_obj, data)
+
+        if isinstance(response, str):
+            return self.deserialize_data(data, response)
+        if isinstance(response, type) and issubclass(response, Enum):
+            return self.deserialize_enum(data, response)
+
+        if data is None or data is CoreNull:
+            return data
+        try:
+            attributes = response._attribute_map  # type: ignore # pylint: disable=protected-access
+            d_attrs = {}
+            for attr, attr_desc in attributes.items():
+                # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+                if attr == "additional_properties" and attr_desc["key"] == "":
+                    continue
+                raw_value = None
+                # Enhance attr_desc with some dynamic data
+                attr_desc = attr_desc.copy()  # Do a copy, do not change the real one
+                internal_data_type = attr_desc["type"].strip("[]{}")
+                if internal_data_type in self.dependencies:
+                    attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+                for key_extractor in self.key_extractors:
+                    found_value = key_extractor(attr, attr_desc, data)
+                    if found_value is not None:
+                        if raw_value is not None and raw_value != found_value:
+                            msg = (
+                                "Ignoring extracted value '%s' from %s for key '%s'"
+                                " (duplicate extraction, follow extractors order)"
+                            )
+                            _LOGGER.warning(msg, found_value, key_extractor, attr)
+                            continue
+                        raw_value = found_value
+
+                value = self.deserialize_data(raw_value, attr_desc["type"])
+                d_attrs[attr] = value
+        except (AttributeError, TypeError, KeyError) as err:
+            msg = "Unable to deserialize to object: " + class_name  # type: ignore
+            raise DeserializationError(msg) from err
+        additional_properties = self._build_additional_properties(attributes, data)
+        return self._instantiate_model(response, d_attrs, additional_properties)
+
+    def _build_additional_properties(self, attribute_map, data):
+        if not self.additional_properties_detection:
+            return None
+        if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+            # Check empty string. If it's not empty, someone has a real "additionalProperties"
+            return None
+        if isinstance(data, ET.Element):
+            data = {el.tag: el.text for el in data}
+
+        known_keys = {
+            _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+            for desc in attribute_map.values()
+            if desc["key"] != ""
+        }
+        present_keys = set(data.keys())
+        missing_keys = present_keys - known_keys
+        return {key: data[key] for key in missing_keys}
+
+    def _classify_target(self, target, data):
+        """Check to see whether the deserialization target object can
+        be classified into a subclass.
+        Once classification has been determined, initialize object.
+
+        :param str target: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :return: The classified target object and its class name.
+        :rtype: tuple
+        """
+        if target is None:
+            return None, None
+
+        if isinstance(target, str):
+            try:
+                target = self.dependencies[target]
+            except KeyError:
+                return target, target
+
+        try:
+            target = target._classify(data, self.dependencies)  # type: ignore # pylint: disable=protected-access
+        except AttributeError:
+            pass  # Target is not a Model, no classify
+        return target, target.__class__.__name__  # type: ignore
+
+    def failsafe_deserialize(self, target_obj, data, content_type=None):
+        """Ignores any errors encountered in deserialization,
+        and falls back to not deserializing the object. Recommended
+        for use in error deserialization, as we want to return the
+        HttpResponseError to users, and not have them deal with
+        a deserialization error.
+
+        :param str target_obj: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :param str content_type: Swagger "produces" if available.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        try:
+            return self(target_obj, data, content_type=content_type)
+        except:  # pylint: disable=bare-except
+            _LOGGER.debug(
+                "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+            )
+            return None
+
+    @staticmethod
+    def _unpack_content(raw_data, content_type=None):
+        """Extract the correct structure for deserialization.
+
+        If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+        if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+        If not a pipeline response and raw_data is bytes or string, use content-type
+        to decode it. If no content-type, try JSON.
+
+        If raw_data is something else, bypass all logic and return it directly.
+
+        :param obj raw_data: Data to be processed.
+        :param str content_type: How to parse if raw_data is a string/bytes.
+        :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+        :raises UnicodeDecodeError: If bytes is not UTF8
+        :rtype: object
+        :return: Unpacked content.
+        """
+        # Assume this is enough to detect a Pipeline Response without importing it
+        context = getattr(raw_data, "context", {})
+        if context:
+            if RawDeserializer.CONTEXT_NAME in context:
+                return context[RawDeserializer.CONTEXT_NAME]
+            raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+        # Assume this is enough to recognize universal_http.ClientResponse without importing it
+        if hasattr(raw_data, "body"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+        # Assume this enough to recognize requests.Response without importing it.
+        if hasattr(raw_data, "_content_consumed"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+        if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+            return RawDeserializer.deserialize_from_text(raw_data, content_type)  # type: ignore
+        return raw_data
+
+    def _instantiate_model(self, response, attrs, additional_properties=None):
+        """Instantiate a response model passing in deserialized args.
+
+        :param Response response: The response model class.
+        :param dict attrs: The deserialized response attributes.
+        :param dict additional_properties: Additional properties to be set.
+        :rtype: Response
+        :return: The instantiated response model.
+        """
+        if callable(response):
+            subtype = getattr(response, "_subtype_map", {})
+            try:
+                readonly = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("readonly")
+                ]
+                const = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("constant")
+                ]
+                kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+                response_obj = response(**kwargs)
+                for attr in readonly:
+                    setattr(response_obj, attr, attrs.get(attr))
+                if additional_properties:
+                    response_obj.additional_properties = additional_properties  # type: ignore
+                return response_obj
+            except TypeError as err:
+                msg = "Unable to deserialize {} into model {}. ".format(kwargs, response)  # type: ignore
+                raise DeserializationError(msg + str(err)) from err
+        else:
+            try:
+                for attr, value in attrs.items():
+                    setattr(response, attr, value)
+                return response
+            except Exception as exp:
+                msg = "Unable to populate response model. "
+                msg += "Type: {}, Error: {}".format(type(response), exp)
+                raise DeserializationError(msg) from exp
+
+    def deserialize_data(self, data, data_type):  # pylint: disable=too-many-return-statements
+        """Process data for deserialization according to data type.
+
+        :param str data: The response string to be deserialized.
+        :param str data_type: The type to deserialize to.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        if data is None:
+            return data
+
+        try:
+            if not data_type:
+                return data
+            if data_type in self.basic_types.values():
+                return self.deserialize_basic(data, data_type)
+            if data_type in self.deserialize_type:
+                if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+                    return data
+
+                is_a_text_parsing_type = lambda x: x not in [  # pylint: disable=unnecessary-lambda-assignment
+                    "object",
+                    "[]",
+                    r"{}",
+                ]
+                if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+                    return None
+                data_val = self.deserialize_type[data_type](data)
+                return data_val
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.deserialize_type:
+                return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+            obj_type = self.dependencies[data_type]
+            if issubclass(obj_type, Enum):
+                if isinstance(data, ET.Element):
+                    data = data.text
+                return self.deserialize_enum(data, obj_type)
+
+        except (ValueError, TypeError, AttributeError) as err:
+            msg = "Unable to deserialize response data."
+            msg += " Data: {}, {}".format(data, data_type)
+            raise DeserializationError(msg) from err
+        return self._deserialize(obj_type, data)
+
+    def deserialize_iter(self, attr, iter_type):
+        """Deserialize an iterable.
+
+        :param list attr: Iterable to be deserialized.
+        :param str iter_type: The type of object in the iterable.
+        :return: Deserialized iterable.
+        :rtype: list
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):  # If I receive an element here, get the children
+            attr = list(attr)
+        if not isinstance(attr, (list, set)):
+            raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+        return [self.deserialize_data(a, iter_type) for a in attr]
+
+    def deserialize_dict(self, attr, dict_type):
+        """Deserialize a dictionary.
+
+        :param dict/list attr: Dictionary to be deserialized. Also accepts
+         a list of key, value pairs.
+        :param str dict_type: The object type of the items in the dictionary.
+        :return: Deserialized dictionary.
+        :rtype: dict
+        """
+        if isinstance(attr, list):
+            return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+        if isinstance(attr, ET.Element):
+            # Transform <Key>value</Key> into {"Key": "value"}
+            attr = {el.tag: el.text for el in attr}
+        return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+    def deserialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Deserialize a generic object.
+        This will be handled as a dictionary.
+
+        :param dict attr: Dictionary to be deserialized.
+        :return: Deserialized object.
+        :rtype: dict
+        :raises TypeError: if non-builtin datatype encountered.
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            # Do no recurse on XML, just return the tree as-is
+            return attr
+        if isinstance(attr, str):
+            return self.deserialize_basic(attr, "str")
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.deserialize_basic(attr, self.basic_types[obj_type])
+        if obj_type is _long_type:
+            return self.deserialize_long(attr)
+
+        if obj_type == dict:
+            deserialized = {}
+            for key, value in attr.items():
+                try:
+                    deserialized[key] = self.deserialize_object(value, **kwargs)
+                except ValueError:
+                    deserialized[key] = None
+            return deserialized
+
+        if obj_type == list:
+            deserialized = []
+            for obj in attr:
+                try:
+                    deserialized.append(self.deserialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return deserialized
+
+        error = "Cannot deserialize generic object with type: "
+        raise TypeError(error + str(obj_type))
+
+    def deserialize_basic(self, attr, data_type):  # pylint: disable=too-many-return-statements
+        """Deserialize basic builtin data type from string.
+        Will attempt to convert to str, int, float and bool.
+        This function will also accept '1', '0', 'true' and 'false' as
+        valid bool values.
+
+        :param str attr: response string to be deserialized.
+        :param str data_type: deserialization data type.
+        :return: Deserialized basic type.
+        :rtype: str, int, float or bool
+        :raises TypeError: if string format is not valid.
+        """
+        # If we're here, data is supposed to be a basic type.
+        # If it's still an XML node, take the text
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+            if not attr:
+                if data_type == "str":
+                    # None or '', node <a/> is empty string.
+                    return ""
+                # None or '', node <a/> with a strong type is None.
+                # Don't try to model "empty bool" or "empty int"
+                return None
+
+        if data_type == "bool":
+            if attr in [True, False, 1, 0]:
+                return bool(attr)
+            if isinstance(attr, str):
+                if attr.lower() in ["true", "1"]:
+                    return True
+                if attr.lower() in ["false", "0"]:
+                    return False
+            raise TypeError("Invalid boolean value: {}".format(attr))
+
+        if data_type == "str":
+            return self.deserialize_unicode(attr)
+        return eval(data_type)(attr)  # nosec # pylint: disable=eval-used
+
+    @staticmethod
+    def deserialize_unicode(data):
+        """Preserve unicode objects in Python 2, otherwise return data
+        as a string.
+
+        :param str data: response string to be deserialized.
+        :return: Deserialized string.
+        :rtype: str or unicode
+        """
+        # We might be here because we have an enum modeled as string,
+        # and we try to deserialize a partial dict with enum inside
+        if isinstance(data, Enum):
+            return data
+
+        # Consider this is real string
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    @staticmethod
+    def deserialize_enum(data, enum_obj):
+        """Deserialize string into enum object.
+
+        If the string is not a valid enum value it will be returned as-is
+        and a warning will be logged.
+
+        :param str data: Response string to be deserialized. If this value is
+         None or invalid it will be returned as-is.
+        :param Enum enum_obj: Enum object to deserialize to.
+        :return: Deserialized enum object.
+        :rtype: Enum
+        """
+        if isinstance(data, enum_obj) or data is None:
+            return data
+        if isinstance(data, Enum):
+            data = data.value
+        if isinstance(data, int):
+            # Workaround. We might consider remove it in the future.
+            try:
+                return list(enum_obj.__members__.values())[data]
+            except IndexError as exc:
+                error = "{!r} is not a valid index for enum {!r}"
+                raise DeserializationError(error.format(data, enum_obj)) from exc
+        try:
+            return enum_obj(str(data))
+        except ValueError:
+            for enum_value in enum_obj:
+                if enum_value.value.lower() == str(data).lower():
+                    return enum_value
+            # We don't fail anymore for unknown value, we deserialize as a string
+            _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+            return Deserializer.deserialize_unicode(data)
+
+    @staticmethod
+    def deserialize_bytearray(attr):
+        """Deserialize string into bytearray.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized bytearray
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return bytearray(b64decode(attr))  # type: ignore
+
+    @staticmethod
+    def deserialize_base64(attr):
+        """Deserialize base64 encoded string into string.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized base64 string
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        padding = "=" * (3 - (len(attr) + 3) % 4)  # type: ignore
+        attr = attr + padding  # type: ignore
+        encoded = attr.replace("-", "+").replace("_", "/")
+        return b64decode(encoded)
+
+    @staticmethod
+    def deserialize_decimal(attr):
+        """Deserialize string into Decimal object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized decimal
+        :raises DeserializationError: if string format invalid.
+        :rtype: decimal
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            return decimal.Decimal(str(attr))  # type: ignore
+        except decimal.DecimalException as err:
+            msg = "Invalid decimal {}".format(attr)
+            raise DeserializationError(msg) from err
+
+    @staticmethod
+    def deserialize_long(attr):
+        """Deserialize string into long (Py2) or int (Py3).
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized int
+        :rtype: long or int
+        :raises ValueError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return _long_type(attr)  # type: ignore
+
+    @staticmethod
+    def deserialize_duration(attr):
+        """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized duration
+        :rtype: TimeDelta
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            duration = isodate.parse_duration(attr)
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize duration object."
+            raise DeserializationError(msg) from err
+        return duration
+
+    @staticmethod
+    def deserialize_date(attr):
+        """Deserialize ISO-8601 formatted string into Date object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized date
+        :rtype: Date
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+        return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+    @staticmethod
+    def deserialize_time(attr):
+        """Deserialize ISO-8601 formatted string into time object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized time
+        :rtype: datetime.time
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        return isodate.parse_time(attr)
+
+    @staticmethod
+    def deserialize_rfc(attr):
+        """Deserialize RFC-1123 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized RFC datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            parsed_date = email.utils.parsedate_tz(attr)  # type: ignore
+            date_obj = datetime.datetime(
+                *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+            )
+            if not date_obj.tzinfo:
+                date_obj = date_obj.astimezone(tz=TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to rfc datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_iso(attr):
+        """Deserialize ISO-8601 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized ISO datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            attr = attr.upper()  # type: ignore
+            match = Deserializer.valid_date.match(attr)
+            if not match:
+                raise ValueError("Invalid datetime string: " + attr)
+
+            check_decimal = attr.split(".")
+            if len(check_decimal) > 1:
+                decimal_str = ""
+                for digit in check_decimal[1]:
+                    if digit.isdigit():
+                        decimal_str += digit
+                    else:
+                        break
+                if len(decimal_str) > 6:
+                    attr = attr.replace(decimal_str, decimal_str[0:6])
+
+            date_obj = isodate.parse_datetime(attr)
+            test_utc = date_obj.utctimetuple()
+            if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_unix(attr):
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param int attr: Object to be serialized.
+        :return: Deserialized datetime
+        :rtype: Datetime
+        :raises DeserializationError: if format invalid
+        """
+        if isinstance(attr, ET.Element):
+            attr = int(attr.text)  # type: ignore
+        try:
+            attr = int(attr)
+            date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to unix datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
new file mode 100644
index 00000000..8a9f7149
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureDataLakeStorageRESTAPI",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000..ecfcec9b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable, Optional
+from typing_extensions import Self
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+
+from .. import models as _models
+from .._serialization import Deserializer, Serializer
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .operations import FileSystemOperations, PathOperations, ServiceOperations
+
+
+class AzureDataLakeStorageRESTAPI:  # pylint: disable=client-accepts-api-version-keyword
+    """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations
+    :ivar file_system: FileSystemOperations operations
+    :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations
+    :ivar path: PathOperations operations
+    :vartype path: azure.storage.filedatalake.aio.operations.PathOperations
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self, url: str, base_url: str = "", x_ms_lease_duration: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        self._config = AzureDataLakeStorageRESTAPIConfiguration(
+            url=url, x_ms_lease_duration=x_ms_lease_duration, **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(
+        self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+    ) -> Awaitable[AsyncHttpResponse]:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = await client._send_request(request)
+        <AsyncHttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.AsyncHttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    async def close(self) -> None:
+        await self._client.close()
+
+    async def __aenter__(self) -> Self:
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *exc_details: Any) -> None:
+        await self._client.__aexit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
new file mode 100644
index 00000000..57b28d3b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_configuration.py
@@ -0,0 +1,61 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional
+
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+
+class AzureDataLakeStorageRESTAPIConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureDataLakeStorageRESTAPI.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, container, or blob that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+     the duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or
+     -1 for infinite lease. Default value is None.
+    :type x_ms_lease_duration: int
+    :keyword resource: The value must be "filesystem" for all filesystem operations. Default value
+     is "filesystem". Note that overriding this default value may result in unsupported behavior.
+    :paramtype resource: str
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-01-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    """
+
+    def __init__(self, url: str, x_ms_lease_duration: Optional[int] = None, **kwargs: Any) -> None:
+        resource: Literal["filesystem"] = kwargs.pop("resource", "filesystem")
+        version: Literal["2025-01-05"] = kwargs.pop("version", "2025-01-05")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.x_ms_lease_duration = x_ms_lease_duration
+        self.resource = resource
+        self.version = version
+        kwargs.setdefault("sdk_moniker", "azuredatalakestoragerestapi/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000..56a7ece3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._file_system_operations import FileSystemOperations  # type: ignore
+from ._path_operations import PathOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "FileSystemOperations",
+    "PathOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
new file mode 100644
index 00000000..ee562931
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_file_system_operations.py
@@ -0,0 +1,628 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._file_system_operations import (
+    build_create_request,
+    build_delete_request,
+    build_get_properties_request,
+    build_list_blob_hierarchy_segment_request,
+    build_list_paths_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FileSystemOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`file_system` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create FileSystem.
+
+        Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+        operation fails.  This operation does not support conditional HTTP requests.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set FileSystem Properties.
+
+        Set properties for the FileSystem.  This operation supports conditional HTTP requests.  For
+        more information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get FileSystem Properties.
+
+        All system and user-defined filesystem properties are specified in the response headers.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete FileSystem.
+
+        Marks the FileSystem for deletion.  When a FileSystem is deleted, a FileSystem with the same
+        identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+        attempts to create a filesystem with the same identifier will fail with status code 409
+        (Conflict), with the service returning additional error information indicating that the
+        filesystem is being deleted. All other operations, including operations on any files or
+        directories within the filesystem, will fail with status code 404 (Not Found) while the
+        filesystem is being deleted. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_paths(
+        self,
+        recursive: bool,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        path: Optional[str] = None,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.PathList:
+        # pylint: disable=line-too-long
+        """List Paths.
+
+        List FileSystem paths and their properties.
+
+        :param recursive: Required. Required.
+        :type recursive: bool
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param path: Optional.  Filters results to paths within the specified directory. An error
+         occurs if the directory does not exist. Default value is None.
+        :type path: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :return: PathList or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.PathList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[_models.PathList] = kwargs.pop("cls", None)
+
+        _request = build_list_paths_request(
+            url=self._config.url,
+            recursive=recursive,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            continuation=continuation,
+            path=path,
+            max_results=max_results,
+            upn=upn,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        deserialized = self._deserialize("PathList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_blob_hierarchy_segment(
+        self,
+        prefix: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        showonly: Literal["deleted"] = "deleted",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Default value is None.
+        :type delimiter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+        :param showonly: Include this parameter to specify one or more datasets to include in the
+         response. Known values are "deleted" and None. Default value is "deleted".
+        :type showonly: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            delimiter=delimiter,
+            marker=marker,
+            max_results=max_results,
+            include=include,
+            showonly=showonly,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
new file mode 100644
index 00000000..d3ed5c3c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_path_operations.py
@@ -0,0 +1,1968 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._path_operations import (
+    build_append_data_request,
+    build_create_request,
+    build_delete_request,
+    build_flush_data_request,
+    build_get_properties_request,
+    build_lease_request,
+    build_read_request,
+    build_set_access_control_recursive_request,
+    build_set_access_control_request,
+    build_set_expiry_request,
+    build_undelete_request,
+    build_update_request,
+)
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class PathOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`path` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        resource: Optional[Union[str, _models.PathResourceType]] = None,
+        continuation: Optional[str] = None,
+        mode: Optional[Union[str, _models.PathRenameMode]] = None,
+        rename_source: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        properties: Optional[str] = None,
+        permissions: Optional[str] = None,
+        umask: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        acl: Optional[str] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_duration: Optional[int] = None,
+        expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+        expires_on: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create File | Create Directory | Rename File | Rename Directory.
+
+        Create or rename a file or directory.    By default, the destination is overwritten and if the
+        destination already exists and has a lease the lease is broken.  This operation supports
+        conditional HTTP requests.  For more information, see `Specifying Conditional Headers for Blob
+        Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+        To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param resource: Required only for Create File and Create Directory. The value must be "file"
+         or "directory". Known values are: "directory" and "file". Default value is None.
+        :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+         behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+         will be "posix". Known values are: "legacy" and "posix". Default value is None.
+        :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+        :param rename_source: An optional file or directory to be renamed.  The value must have the
+         following format: "/{filesystem}/{path}".  If "x-ms-properties" is specified, the properties
+         will overwrite the existing properties; otherwise, the existing properties will be preserved.
+         This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+         characters in the ISO-8859-1 character set. Default value is None.
+        :type rename_source: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+         When creating a file or directory and the parent folder does not have a default ACL, the umask
+         restricts the permissions of the file or directory to be created.  The resulting permission is
+         given by p bitwise and not u, where p is the permission and u is the umask.  For example, if p
+         is 0777 and u is 0057, then the resulting permission is 0720.  The default permission is 0777
+         for a directory and 0666 for a file.  The default umask is 0027.  The umask must be specified
+         in 4-digit octal notation (e.g. 0766). Default value is None.
+        :type umask: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Default value is None.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :param encryption_context: Specifies the encryption context to set on the file. Default value
+         is None.
+        :type encryption_context: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _cache_control = None
+        _content_encoding = None
+        _content_language = None
+        _content_disposition = None
+        _content_type_parameter = None
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=resource,
+            continuation=continuation,
+            mode=mode,
+            cache_control=_cache_control,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            content_disposition=_content_disposition,
+            content_type_parameter=_content_type_parameter,
+            rename_source=rename_source,
+            lease_id=_lease_id,
+            source_lease_id=source_lease_id,
+            properties=properties,
+            permissions=permissions,
+            umask=umask,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            owner=owner,
+            group=group,
+            acl=acl,
+            proposed_lease_id=proposed_lease_id,
+            lease_duration=lease_duration,
+            expiry_options=expiry_options,
+            expires_on=expires_on,
+            encryption_context=encryption_context,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def update(
+        self,
+        action: Union[str, _models.PathUpdateAction],
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        body: IO[bytes],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        max_records: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        properties: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Optional[_models.SetAccessControlRecursiveResponse]:
+        # pylint: disable=line-too-long
+        """Append Data | Flush Data | Set Properties | Set Access Control.
+
+        Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+        sets properties for a file or directory, or sets access control for a file or directory. Data
+        can only be appended to a file. Concurrent writes to the same file using multiple clients are
+        not supported. This operation supports conditional HTTP requests. For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+         flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+         directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+         a file or directory, or  "setAccessControlRecursive" to set the access control list for a
+         directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+         order to use access control.  Also note that the Access Control List (ACL) includes permissions
+         for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+         are mutually exclusive. Known values are: "append", "flush", "setProperties",
+         "setAccessControl", and "setAccessControlRecursive". Required.
+        :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+         maximum number of files or directories on which the acl change will be applied. If omitted or
+         greater than 2,000, the request will process up to 2,000 items. Default value is None.
+        :type max_records: int
+        :param continuation: Optional. The number of paths processed with each invocation is limited.
+         If the number of paths to be processed exceeds this limit, a continuation token is returned in
+         the response header x-ms-continuation. When a continuation token is  returned in the response,
+         it must be percent-encoded and specified in a subsequent invocation of
+         setAccessControlRecursive operation. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: SetAccessControlRecursiveResponse or None or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[Optional[_models.SetAccessControlRecursiveResponse]] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_update_request(
+            url=self._config.url,
+            action=action,
+            mode=mode,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            max_records=max_records,
+            continuation=continuation,
+            force_flag=force_flag,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            properties=properties,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        deserialized = None
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+            deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if response.status_code == 202:
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def lease(
+        self,
+        x_ms_lease_action: Union[str, _models.PathLeaseAction],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        x_ms_lease_break_period: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Lease Path.
+
+        Create and manage a lease to restrict write and delete access to the path. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+         and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+         to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+         lease break period is allowed to elapse, during which time no lease operation except break and
+         release can be performed on the file. When a lease is successfully broken, the response
+         indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+         the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+         change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+         existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. Known values
+         are: "acquire", "break", "change", "renew", "release", and "break". Required.
+        :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+         and  specifies the break period of the lease in seconds.  The lease break  duration must be
+         between 0 and 60 seconds. Default value is None.
+        :type x_ms_lease_break_period: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_lease_request(
+            url=self._config.url,
+            x_ms_lease_action=x_ms_lease_action,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            x_ms_lease_break_period=x_ms_lease_break_period,
+            lease_id=_lease_id,
+            proposed_lease_id=proposed_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            x_ms_lease_duration=self._config.x_ms_lease_duration,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 201, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 201:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 202:
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-time"] = self._deserialize("str", response.headers.get("x-ms-lease-time"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def read(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        x_ms_range_get_content_md5: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """Read File.
+
+        Read the contents of a file.  For read operations, range requests are supported. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+         to be retrieved. Default value is None.
+        :type range: str
+        :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+         together with the Range header, the service returns the MD5 hash for the range, as long as the
+         range is less than or equal to 4MB in size. If this header is specified without the Range
+         header, the service returns status code 400 (Bad Request). If this header is set to true when
+         the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default
+         value is None.
+        :type x_ms_range_get_content_md5: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_read_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            x_ms_range_get_content_md5=x_ms_range_get_content_md5,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        if response.status_code == 206:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-content-md5"] = self._deserialize("str", response.headers.get("x-ms-content-md5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+        upn: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get Properties | Get Status | Get Access Control List.
+
+        Get Properties returns all system and user defined properties for a path. Get Status returns
+        all system defined properties for a path. Get Access Control List returns the access control
+        list for a path. This operation supports conditional HTTP requests.  For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param action: Optional. If the value is "getStatus" only the system defined properties for the
+         path are returned. If the value is "getAccessControl" the access control list is returned in
+         the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+         properties are returned. Known values are: "getAccessControl" and "getStatus". Default value is
+         None.
+        :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            action=action,
+            upn=upn,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-permissions"] = self._deserialize("str", response.headers.get("x-ms-permissions"))
+        response_headers["x-ms-acl"] = self._deserialize("str", response.headers.get("x-ms-acl"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        recursive: Optional[bool] = None,
+        continuation: Optional[str] = None,
+        paginated: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete File | Delete Directory.
+
+        Delete the file or directory. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param recursive: Required. Default value is None.
+        :type recursive: bool
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param paginated: If true, paginated behavior will be seen. Pagination is for the recursive ACL
+         checks as a POSIX requirement in the server and Delete in an atomic operation once the ACL
+         checks are completed. If false or missing, normal default behavior will kick in, which may
+         timeout in case of very large directories due to recursive ACL checks. This new parameter is
+         introduced for backward compatibility. Default value is None.
+        :type paginated: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            recursive=recursive,
+            continuation=continuation,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            paginated=paginated,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-deletion-id"] = self._deserialize("str", response.headers.get("x-ms-deletion-id"))
+
+        if response.status_code == 202:
+            response_headers["Date"] = self._deserialize("str", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control(
+        self,
+        timeout: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_access_control_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_control_recursive(
+        self,
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        max_records: Optional[int] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.SetAccessControlRecursiveResponse:
+        # pylint: disable=line-too-long
+        """Set the access control list for a path and sub-paths.
+
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param max_records: Optional. It specifies the maximum number of files or directories on which
+         the acl change will be applied. If omitted or greater than 2,000, the request will process up
+         to 2,000 items. Default value is None.
+        :type max_records: int
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: SetAccessControlRecursiveResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControlRecursive"] = kwargs.pop(
+            "action", _params.pop("action", "setAccessControlRecursive")
+        )
+        cls: ClsType[_models.SetAccessControlRecursiveResponse] = kwargs.pop("cls", None)
+
+        _request = build_set_access_control_recursive_request(
+            url=self._config.url,
+            mode=mode,
+            timeout=timeout,
+            continuation=continuation,
+            force_flag=force_flag,
+            max_records=max_records,
+            acl=acl,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def flush_data(
+        self,
+        timeout: Optional[int] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_flush_data_request(
+            url=self._config.url,
+            timeout=timeout,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def append_data(
+        self,
+        body: IO[bytes],
+        position: Optional[int] = None,
+        timeout: Optional[int] = None,
+        content_length: Optional[int] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        flush: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Append data to the file.
+
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param flush: If file should be flushed after the append. Default value is None.
+        :type flush: bool
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _transactional_content_hash = None
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _transactional_content_hash = path_http_headers.transactional_content_hash
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        _content = body
+
+        _request = build_append_data_request(
+            url=self._config.url,
+            position=position,
+            timeout=timeout,
+            content_length=content_length,
+            transactional_content_hash=_transactional_content_hash,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            flush=flush,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            action=action,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_expiry(
+        self,
+        expiry_options: Union[str, _models.PathExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def undelete(
+        self,
+        timeout: Optional[int] = None,
+        undelete_source: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a path that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+         the soft deleted blob to undelete. Default value is None.
+        :type undelete_source: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            undelete_source=undelete_source,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000..0e0243e9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,161 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterable, Callable, Dict, Literal, Optional, TypeVar
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._service_operations import build_list_file_systems_request
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def list_file_systems(
+        self,
+        prefix: Optional[str] = None,
+        continuation: Optional[str] = None,
+        max_results: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncIterable["_models.FileSystem"]:
+        # pylint: disable=line-too-long
+        """List FileSystems.
+
+        List filesystems and their properties in given account.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: An iterator like instance of either FileSystem or the result of cls(response)
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystem]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+        cls: ClsType[_models.FileSystemList] = kwargs.pop("cls", None)
+
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        def prepare_request(next_link=None):
+            if not next_link:
+
+                _request = build_list_file_systems_request(
+                    url=self._config.url,
+                    prefix=prefix,
+                    continuation=continuation,
+                    max_results=max_results,
+                    request_id_parameter=request_id_parameter,
+                    timeout=timeout,
+                    resource=resource,
+                    version=self._config.version,
+                    headers=_headers,
+                    params=_params,
+                )
+                _request.url = self._client.format_url(_request.url)
+
+            else:
+                _request = HttpRequest("GET", next_link)
+                _request.url = self._client.format_url(_request.url)
+                _request.method = "GET"
+            return _request
+
+        async def extract_data(pipeline_response):
+            deserialized = self._deserialize("FileSystemList", pipeline_response)
+            list_of_elem = deserialized.filesystems
+            if cls:
+                list_of_elem = cls(list_of_elem)  # type: ignore
+            return None, AsyncList(list_of_elem)
+
+        async def get_next(next_link=None):
+            _request = prepare_request(next_link)
+
+            _stream = False
+            pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+                _request, stream=_stream, **kwargs
+            )
+            response = pipeline_response.http_response
+
+            if response.status_code not in [200]:
+                map_error(status_code=response.status_code, response=response, error_map=error_map)
+                error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+                raise HttpResponseError(response=response, model=error)
+
+            return pipeline_response
+
+        return AsyncItemPaged(get_next, extract_data)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py
new file mode 100644
index 00000000..ca1ce1ca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/__init__.py
@@ -0,0 +1,82 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import (  # type: ignore
+    AclFailedEntry,
+    BlobHierarchyListSegment,
+    BlobItemInternal,
+    BlobPrefix,
+    BlobPropertiesInternal,
+    CpkInfo,
+    FileSystem,
+    FileSystemList,
+    LeaseAccessConditions,
+    ListBlobsHierarchySegmentResponse,
+    ModifiedAccessConditions,
+    Path,
+    PathHTTPHeaders,
+    PathList,
+    SetAccessControlRecursiveResponse,
+    SourceModifiedAccessConditions,
+    StorageError,
+    StorageErrorError,
+)
+
+from ._azure_data_lake_storage_restapi_enums import (  # type: ignore
+    LeaseAction,
+    ListBlobsIncludeItem,
+    PathExpiryOptions,
+    PathGetPropertiesAction,
+    PathLeaseAction,
+    PathRenameMode,
+    PathResourceType,
+    PathSetAccessControlRecursiveMode,
+    PathUpdateAction,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AclFailedEntry",
+    "BlobHierarchyListSegment",
+    "BlobItemInternal",
+    "BlobPrefix",
+    "BlobPropertiesInternal",
+    "CpkInfo",
+    "FileSystem",
+    "FileSystemList",
+    "LeaseAccessConditions",
+    "ListBlobsHierarchySegmentResponse",
+    "ModifiedAccessConditions",
+    "Path",
+    "PathHTTPHeaders",
+    "PathList",
+    "SetAccessControlRecursiveResponse",
+    "SourceModifiedAccessConditions",
+    "StorageError",
+    "StorageErrorError",
+    "LeaseAction",
+    "ListBlobsIncludeItem",
+    "PathExpiryOptions",
+    "PathGetPropertiesAction",
+    "PathLeaseAction",
+    "PathRenameMode",
+    "PathResourceType",
+    "PathSetAccessControlRecursiveMode",
+    "PathUpdateAction",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py
new file mode 100644
index 00000000..c9bb43b5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_azure_data_lake_storage_restapi_enums.py
@@ -0,0 +1,90 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class LeaseAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """LeaseAction."""
+
+    ACQUIRE = "acquire"
+    AUTO_RENEW = "auto-renew"
+    RELEASE = "release"
+    ACQUIRE_RELEASE = "acquire-release"
+
+
+class ListBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListBlobsIncludeItem."""
+
+    COPY = "copy"
+    DELETED = "deleted"
+    METADATA = "metadata"
+    SNAPSHOTS = "snapshots"
+    UNCOMMITTEDBLOBS = "uncommittedblobs"
+    VERSIONS = "versions"
+    TAGS = "tags"
+
+
+class PathExpiryOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathExpiryOptions."""
+
+    NEVER_EXPIRE = "NeverExpire"
+    RELATIVE_TO_CREATION = "RelativeToCreation"
+    RELATIVE_TO_NOW = "RelativeToNow"
+    ABSOLUTE = "Absolute"
+
+
+class PathGetPropertiesAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathGetPropertiesAction."""
+
+    GET_ACCESS_CONTROL = "getAccessControl"
+    GET_STATUS = "getStatus"
+
+
+class PathLeaseAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathLeaseAction."""
+
+    ACQUIRE = "acquire"
+    BREAK = "break"
+    CHANGE = "change"
+    RENEW = "renew"
+    RELEASE = "release"
+    BREAK_ENUM = "break"
+
+
+class PathRenameMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathRenameMode."""
+
+    LEGACY = "legacy"
+    POSIX = "posix"
+
+
+class PathResourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathResourceType."""
+
+    DIRECTORY = "directory"
+    FILE = "file"
+
+
+class PathSetAccessControlRecursiveMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathSetAccessControlRecursiveMode."""
+
+    SET = "set"
+    MODIFY = "modify"
+    REMOVE = "remove"
+
+
+class PathUpdateAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PathUpdateAction."""
+
+    APPEND = "append"
+    FLUSH = "flush"
+    SET_PROPERTIES = "setProperties"
+    SET_ACCESS_CONTROL = "setAccessControl"
+    SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py
new file mode 100644
index 00000000..6289f29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_models_py3.py
@@ -0,0 +1,1041 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+from typing import Any, List, Literal, Optional, TYPE_CHECKING
+
+from .. import _serialization
+
+if TYPE_CHECKING:
+    from .. import models as _models
+
+
+class AclFailedEntry(_serialization.Model):
+    """AclFailedEntry.
+
+    :ivar name:
+    :vartype name: str
+    :ivar type:
+    :vartype type: str
+    :ivar error_message:
+    :vartype error_message: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "type": {"key": "type", "type": "str"},
+        "error_message": {"key": "errorMessage", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        type: Optional[str] = None,
+        error_message: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword type:
+        :paramtype type: str
+        :keyword error_message:
+        :paramtype error_message: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.type = type
+        self.error_message = error_message
+
+
+class BlobHierarchyListSegment(_serialization.Model):
+    """BlobHierarchyListSegment.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar blob_prefixes:
+    :vartype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+    :ivar blob_items: Required.
+    :vartype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+    """
+
+    _validation = {
+        "blob_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "blob_prefixes": {"key": "BlobPrefixes", "type": "[BlobPrefix]"},
+        "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"itemsName": "Blob"}},
+    }
+    _xml_map = {"name": "Blobs"}
+
+    def __init__(
+        self,
+        *,
+        blob_items: List["_models.BlobItemInternal"],
+        blob_prefixes: Optional[List["_models.BlobPrefix"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword blob_prefixes:
+        :paramtype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+        :keyword blob_items: Required.
+        :paramtype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+        """
+        super().__init__(**kwargs)
+        self.blob_prefixes = blob_prefixes
+        self.blob_items = blob_items
+
+
+class BlobItemInternal(_serialization.Model):
+    """An Azure Storage blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    :ivar deleted: Required.
+    :vartype deleted: bool
+    :ivar snapshot: Required.
+    :vartype snapshot: str
+    :ivar version_id:
+    :vartype version_id: str
+    :ivar is_current_version:
+    :vartype is_current_version: bool
+    :ivar properties: Properties of a blob. Required.
+    :vartype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+    :ivar deletion_id:
+    :vartype deletion_id: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "deleted": {"required": True},
+        "snapshot": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "deleted": {"key": "Deleted", "type": "bool"},
+        "snapshot": {"key": "Snapshot", "type": "str"},
+        "version_id": {"key": "VersionId", "type": "str"},
+        "is_current_version": {"key": "IsCurrentVersion", "type": "bool"},
+        "properties": {"key": "Properties", "type": "BlobPropertiesInternal"},
+        "deletion_id": {"key": "DeletionId", "type": "str"},
+    }
+    _xml_map = {"name": "Blob"}
+
+    def __init__(
+        self,
+        *,
+        name: str,
+        deleted: bool,
+        snapshot: str,
+        properties: "_models.BlobPropertiesInternal",
+        version_id: Optional[str] = None,
+        is_current_version: Optional[bool] = None,
+        deletion_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        :keyword deleted: Required.
+        :paramtype deleted: bool
+        :keyword snapshot: Required.
+        :paramtype snapshot: str
+        :keyword version_id:
+        :paramtype version_id: str
+        :keyword is_current_version:
+        :paramtype is_current_version: bool
+        :keyword properties: Properties of a blob. Required.
+        :paramtype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+        :keyword deletion_id:
+        :paramtype deletion_id: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.deleted = deleted
+        self.snapshot = snapshot
+        self.version_id = version_id
+        self.is_current_version = is_current_version
+        self.properties = properties
+        self.deletion_id = deletion_id
+
+
+class BlobPrefix(_serialization.Model):
+    """BlobPrefix.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+    }
+
+    def __init__(self, *, name: str, **kwargs: Any) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+
+
+class BlobPropertiesInternal(_serialization.Model):
+    """Properties of a blob.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar creation_time:
+    :vartype creation_time: ~datetime.datetime
+    :ivar last_modified: Required.
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag: Required.
+    :vartype etag: str
+    :ivar content_length: Size in bytes.
+    :vartype content_length: int
+    :ivar content_type:
+    :vartype content_type: str
+    :ivar content_encoding:
+    :vartype content_encoding: str
+    :ivar content_language:
+    :vartype content_language: str
+    :ivar content_md5:
+    :vartype content_md5: bytes
+    :ivar content_disposition:
+    :vartype content_disposition: str
+    :ivar cache_control:
+    :vartype cache_control: str
+    :ivar blob_sequence_number:
+    :vartype blob_sequence_number: int
+    :ivar copy_id:
+    :vartype copy_id: str
+    :ivar copy_source:
+    :vartype copy_source: str
+    :ivar copy_progress:
+    :vartype copy_progress: str
+    :ivar copy_completion_time:
+    :vartype copy_completion_time: ~datetime.datetime
+    :ivar copy_status_description:
+    :vartype copy_status_description: str
+    :ivar server_encrypted:
+    :vartype server_encrypted: bool
+    :ivar incremental_copy:
+    :vartype incremental_copy: bool
+    :ivar destination_snapshot:
+    :vartype destination_snapshot: str
+    :ivar deleted_time:
+    :vartype deleted_time: ~datetime.datetime
+    :ivar remaining_retention_days:
+    :vartype remaining_retention_days: int
+    :ivar access_tier_inferred:
+    :vartype access_tier_inferred: bool
+    :ivar customer_provided_key_sha256:
+    :vartype customer_provided_key_sha256: str
+    :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
+    :vartype encryption_scope: str
+    :ivar access_tier_change_time:
+    :vartype access_tier_change_time: ~datetime.datetime
+    :ivar tag_count:
+    :vartype tag_count: int
+    :ivar expires_on:
+    :vartype expires_on: ~datetime.datetime
+    :ivar is_sealed:
+    :vartype is_sealed: bool
+    :ivar last_accessed_on:
+    :vartype last_accessed_on: ~datetime.datetime
+    :ivar delete_time:
+    :vartype delete_time: ~datetime.datetime
+    """
+
+    _validation = {
+        "last_modified": {"required": True},
+        "etag": {"required": True},
+    }
+
+    _attribute_map = {
+        "creation_time": {"key": "Creation-Time", "type": "rfc-1123"},
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+        "content_length": {"key": "Content-Length", "type": "int"},
+        "content_type": {"key": "Content-Type", "type": "str"},
+        "content_encoding": {"key": "Content-Encoding", "type": "str"},
+        "content_language": {"key": "Content-Language", "type": "str"},
+        "content_md5": {"key": "Content-MD5", "type": "bytearray"},
+        "content_disposition": {"key": "Content-Disposition", "type": "str"},
+        "cache_control": {"key": "Cache-Control", "type": "str"},
+        "blob_sequence_number": {"key": "x-ms-blob-sequence-number", "type": "int"},
+        "copy_id": {"key": "CopyId", "type": "str"},
+        "copy_source": {"key": "CopySource", "type": "str"},
+        "copy_progress": {"key": "CopyProgress", "type": "str"},
+        "copy_completion_time": {"key": "CopyCompletionTime", "type": "rfc-1123"},
+        "copy_status_description": {"key": "CopyStatusDescription", "type": "str"},
+        "server_encrypted": {"key": "ServerEncrypted", "type": "bool"},
+        "incremental_copy": {"key": "IncrementalCopy", "type": "bool"},
+        "destination_snapshot": {"key": "DestinationSnapshot", "type": "str"},
+        "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"},
+        "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"},
+        "access_tier_inferred": {"key": "AccessTierInferred", "type": "bool"},
+        "customer_provided_key_sha256": {"key": "CustomerProvidedKeySha256", "type": "str"},
+        "encryption_scope": {"key": "EncryptionScope", "type": "str"},
+        "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"},
+        "tag_count": {"key": "TagCount", "type": "int"},
+        "expires_on": {"key": "Expiry-Time", "type": "rfc-1123"},
+        "is_sealed": {"key": "Sealed", "type": "bool"},
+        "last_accessed_on": {"key": "LastAccessTime", "type": "rfc-1123"},
+        "delete_time": {"key": "DeleteTime", "type": "rfc-1123"},
+    }
+    _xml_map = {"name": "Properties"}
+
+    def __init__(  # pylint: disable=too-many-locals
+        self,
+        *,
+        last_modified: datetime.datetime,
+        etag: str,
+        creation_time: Optional[datetime.datetime] = None,
+        content_length: Optional[int] = None,
+        content_type: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_md5: Optional[bytes] = None,
+        content_disposition: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        blob_sequence_number: Optional[int] = None,
+        copy_id: Optional[str] = None,
+        copy_source: Optional[str] = None,
+        copy_progress: Optional[str] = None,
+        copy_completion_time: Optional[datetime.datetime] = None,
+        copy_status_description: Optional[str] = None,
+        server_encrypted: Optional[bool] = None,
+        incremental_copy: Optional[bool] = None,
+        destination_snapshot: Optional[str] = None,
+        deleted_time: Optional[datetime.datetime] = None,
+        remaining_retention_days: Optional[int] = None,
+        access_tier_inferred: Optional[bool] = None,
+        customer_provided_key_sha256: Optional[str] = None,
+        encryption_scope: Optional[str] = None,
+        access_tier_change_time: Optional[datetime.datetime] = None,
+        tag_count: Optional[int] = None,
+        expires_on: Optional[datetime.datetime] = None,
+        is_sealed: Optional[bool] = None,
+        last_accessed_on: Optional[datetime.datetime] = None,
+        delete_time: Optional[datetime.datetime] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword creation_time:
+        :paramtype creation_time: ~datetime.datetime
+        :keyword last_modified: Required.
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag: Required.
+        :paramtype etag: str
+        :keyword content_length: Size in bytes.
+        :paramtype content_length: int
+        :keyword content_type:
+        :paramtype content_type: str
+        :keyword content_encoding:
+        :paramtype content_encoding: str
+        :keyword content_language:
+        :paramtype content_language: str
+        :keyword content_md5:
+        :paramtype content_md5: bytes
+        :keyword content_disposition:
+        :paramtype content_disposition: str
+        :keyword cache_control:
+        :paramtype cache_control: str
+        :keyword blob_sequence_number:
+        :paramtype blob_sequence_number: int
+        :keyword copy_id:
+        :paramtype copy_id: str
+        :keyword copy_source:
+        :paramtype copy_source: str
+        :keyword copy_progress:
+        :paramtype copy_progress: str
+        :keyword copy_completion_time:
+        :paramtype copy_completion_time: ~datetime.datetime
+        :keyword copy_status_description:
+        :paramtype copy_status_description: str
+        :keyword server_encrypted:
+        :paramtype server_encrypted: bool
+        :keyword incremental_copy:
+        :paramtype incremental_copy: bool
+        :keyword destination_snapshot:
+        :paramtype destination_snapshot: str
+        :keyword deleted_time:
+        :paramtype deleted_time: ~datetime.datetime
+        :keyword remaining_retention_days:
+        :paramtype remaining_retention_days: int
+        :keyword access_tier_inferred:
+        :paramtype access_tier_inferred: bool
+        :keyword customer_provided_key_sha256:
+        :paramtype customer_provided_key_sha256: str
+        :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
+        :paramtype encryption_scope: str
+        :keyword access_tier_change_time:
+        :paramtype access_tier_change_time: ~datetime.datetime
+        :keyword tag_count:
+        :paramtype tag_count: int
+        :keyword expires_on:
+        :paramtype expires_on: ~datetime.datetime
+        :keyword is_sealed:
+        :paramtype is_sealed: bool
+        :keyword last_accessed_on:
+        :paramtype last_accessed_on: ~datetime.datetime
+        :keyword delete_time:
+        :paramtype delete_time: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.creation_time = creation_time
+        self.last_modified = last_modified
+        self.etag = etag
+        self.content_length = content_length
+        self.content_type = content_type
+        self.content_encoding = content_encoding
+        self.content_language = content_language
+        self.content_md5 = content_md5
+        self.content_disposition = content_disposition
+        self.cache_control = cache_control
+        self.blob_sequence_number = blob_sequence_number
+        self.copy_id = copy_id
+        self.copy_source = copy_source
+        self.copy_progress = copy_progress
+        self.copy_completion_time = copy_completion_time
+        self.copy_status_description = copy_status_description
+        self.server_encrypted = server_encrypted
+        self.incremental_copy = incremental_copy
+        self.destination_snapshot = destination_snapshot
+        self.deleted_time = deleted_time
+        self.remaining_retention_days = remaining_retention_days
+        self.access_tier_inferred = access_tier_inferred
+        self.customer_provided_key_sha256 = customer_provided_key_sha256
+        self.encryption_scope = encryption_scope
+        self.access_tier_change_time = access_tier_change_time
+        self.tag_count = tag_count
+        self.expires_on = expires_on
+        self.is_sealed = is_sealed
+        self.last_accessed_on = last_accessed_on
+        self.delete_time = delete_time
+
+
+class CpkInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+     provided in the request. If not specified, encryption is performed with the root account
+     encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+    :vartype encryption_key: str
+    :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided
+     if the x-ms-encryption-key header is provided.
+    :vartype encryption_key_sha256: str
+    :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently,
+     the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is
+     provided. Default value is "AES256".
+    :vartype encryption_algorithm: str
+    """
+
+    _attribute_map = {
+        "encryption_key": {"key": "encryptionKey", "type": "str"},
+        "encryption_key_sha256": {"key": "encryptionKeySha256", "type": "str"},
+        "encryption_algorithm": {"key": "encryptionAlgorithm", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        encryption_key: Optional[str] = None,
+        encryption_key_sha256: Optional[str] = None,
+        encryption_algorithm: Optional[Literal["AES256"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data
+         provided in the request. If not specified, encryption is performed with the root account
+         encryption key.  For more information, see Encryption at Rest for Azure Storage Services.
+        :paramtype encryption_key: str
+        :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be
+         provided if the x-ms-encryption-key header is provided.
+        :paramtype encryption_key_sha256: str
+        :keyword encryption_algorithm: The algorithm used to produce the encryption key hash.
+         Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key
+         header is provided. Default value is "AES256".
+        :paramtype encryption_algorithm: str
+        """
+        super().__init__(**kwargs)
+        self.encryption_key = encryption_key
+        self.encryption_key_sha256 = encryption_key_sha256
+        self.encryption_algorithm = encryption_algorithm
+
+
+class FileSystem(_serialization.Model):
+    """FileSystem.
+
+    :ivar name:
+    :vartype name: str
+    :ivar last_modified:
+    :vartype last_modified: str
+    :ivar e_tag:
+    :vartype e_tag: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "last_modified": {"key": "lastModified", "type": "str"},
+        "e_tag": {"key": "eTag", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        last_modified: Optional[str] = None,
+        e_tag: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword last_modified:
+        :paramtype last_modified: str
+        :keyword e_tag:
+        :paramtype e_tag: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.last_modified = last_modified
+        self.e_tag = e_tag
+
+
+class FileSystemList(_serialization.Model):
+    """FileSystemList.
+
+    :ivar filesystems:
+    :vartype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+    """
+
+    _attribute_map = {
+        "filesystems": {"key": "filesystems", "type": "[FileSystem]"},
+    }
+
+    def __init__(self, *, filesystems: Optional[List["_models.FileSystem"]] = None, **kwargs: Any) -> None:
+        """
+        :keyword filesystems:
+        :paramtype filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+        """
+        super().__init__(**kwargs)
+        self.filesystems = filesystems
+
+
+class LeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and
+     matches this ID.
+    :vartype lease_id: str
+    """
+
+    _attribute_map = {
+        "lease_id": {"key": "leaseId", "type": "str"},
+    }
+
+    def __init__(self, *, lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active
+         and matches this ID.
+        :paramtype lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.lease_id = lease_id
+
+
+class ListBlobsHierarchySegmentResponse(_serialization.Model):
+    """An enumeration of blobs.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar container_name: Required.
+    :vartype container_name: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar delimiter:
+    :vartype delimiter: str
+    :ivar segment: Required.
+    :vartype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+    :ivar next_marker:
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "container_name": {"required": True},
+        "segment": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "delimiter": {"key": "Delimiter", "type": "str"},
+        "segment": {"key": "Segment", "type": "BlobHierarchyListSegment"},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        container_name: str,
+        segment: "_models.BlobHierarchyListSegment",
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        delimiter: Optional[str] = None,
+        next_marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword container_name: Required.
+        :paramtype container_name: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword delimiter:
+        :paramtype delimiter: str
+        :keyword segment: Required.
+        :paramtype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+        :keyword next_marker:
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.container_name = container_name
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.delimiter = delimiter
+        self.segment = segment
+        self.next_marker = next_marker
+
+
+class ModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar if_modified_since: Specify this header value to operate only on a blob if it has been
+     modified since the specified date/time.
+    :vartype if_modified_since: ~datetime.datetime
+    :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not
+     been modified since the specified date/time.
+    :vartype if_unmodified_since: ~datetime.datetime
+    :ivar if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype if_match: str
+    :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value.
+    :vartype if_none_match: str
+    """
+
+    _attribute_map = {
+        "if_modified_since": {"key": "ifModifiedSince", "type": "rfc-1123"},
+        "if_unmodified_since": {"key": "ifUnmodifiedSince", "type": "rfc-1123"},
+        "if_match": {"key": "ifMatch", "type": "str"},
+        "if_none_match": {"key": "ifNoneMatch", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        if_modified_since: Optional[datetime.datetime] = None,
+        if_unmodified_since: Optional[datetime.datetime] = None,
+        if_match: Optional[str] = None,
+        if_none_match: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword if_modified_since: Specify this header value to operate only on a blob if it has been
+         modified since the specified date/time.
+        :paramtype if_modified_since: ~datetime.datetime
+        :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not
+         been modified since the specified date/time.
+        :paramtype if_unmodified_since: ~datetime.datetime
+        :keyword if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype if_match: str
+        :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching
+         value.
+        :paramtype if_none_match: str
+        """
+        super().__init__(**kwargs)
+        self.if_modified_since = if_modified_since
+        self.if_unmodified_since = if_unmodified_since
+        self.if_match = if_match
+        self.if_none_match = if_none_match
+
+
+class Path(_serialization.Model):
+    """Path.
+
+    :ivar name:
+    :vartype name: str
+    :ivar is_directory:
+    :vartype is_directory: bool
+    :ivar last_modified:
+    :vartype last_modified: str
+    :ivar e_tag:
+    :vartype e_tag: str
+    :ivar content_length:
+    :vartype content_length: int
+    :ivar owner:
+    :vartype owner: str
+    :ivar group:
+    :vartype group: str
+    :ivar permissions:
+    :vartype permissions: str
+    :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
+    :vartype encryption_scope: str
+    :ivar creation_time:
+    :vartype creation_time: str
+    :ivar expiry_time:
+    :vartype expiry_time: str
+    :ivar encryption_context:
+    :vartype encryption_context: str
+    """
+
+    _attribute_map = {
+        "name": {"key": "name", "type": "str"},
+        "is_directory": {"key": "isDirectory", "type": "bool"},
+        "last_modified": {"key": "lastModified", "type": "str"},
+        "e_tag": {"key": "eTag", "type": "str"},
+        "content_length": {"key": "contentLength", "type": "int"},
+        "owner": {"key": "owner", "type": "str"},
+        "group": {"key": "group", "type": "str"},
+        "permissions": {"key": "permissions", "type": "str"},
+        "encryption_scope": {"key": "EncryptionScope", "type": "str"},
+        "creation_time": {"key": "creationTime", "type": "str"},
+        "expiry_time": {"key": "expiryTime", "type": "str"},
+        "encryption_context": {"key": "EncryptionContext", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        name: Optional[str] = None,
+        is_directory: bool = False,
+        last_modified: Optional[str] = None,
+        e_tag: Optional[str] = None,
+        content_length: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        encryption_scope: Optional[str] = None,
+        creation_time: Optional[str] = None,
+        expiry_time: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name:
+        :paramtype name: str
+        :keyword is_directory:
+        :paramtype is_directory: bool
+        :keyword last_modified:
+        :paramtype last_modified: str
+        :keyword e_tag:
+        :paramtype e_tag: str
+        :keyword content_length:
+        :paramtype content_length: int
+        :keyword owner:
+        :paramtype owner: str
+        :keyword group:
+        :paramtype group: str
+        :keyword permissions:
+        :paramtype permissions: str
+        :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
+        :paramtype encryption_scope: str
+        :keyword creation_time:
+        :paramtype creation_time: str
+        :keyword expiry_time:
+        :paramtype expiry_time: str
+        :keyword encryption_context:
+        :paramtype encryption_context: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.is_directory = is_directory
+        self.last_modified = last_modified
+        self.e_tag = e_tag
+        self.content_length = content_length
+        self.owner = owner
+        self.group = group
+        self.permissions = permissions
+        self.encryption_scope = encryption_scope
+        self.creation_time = creation_time
+        self.expiry_time = expiry_time
+        self.encryption_context = encryption_context
+
+
+class PathHTTPHeaders(_serialization.Model):
+    """Parameter group.
+
+    :ivar cache_control: Optional. Sets the blob's cache control. If specified, this property is
+     stored with the blob and returned with a read request.
+    :vartype cache_control: str
+    :ivar content_encoding: Optional. Sets the blob's content encoding. If specified, this property
+     is stored with the blob and returned with a read request.
+    :vartype content_encoding: str
+    :ivar content_language: Optional. Set the blob's content language. If specified, this property
+     is stored with the blob and returned with a read request.
+    :vartype content_language: str
+    :ivar content_disposition: Optional. Sets the blob's Content-Disposition header.
+    :vartype content_disposition: str
+    :ivar content_type: Optional. Sets the blob's content type. If specified, this property is
+     stored with the blob and returned with a read request.
+    :vartype content_type: str
+    :ivar content_md5: Specify the transactional md5 for the body, to be validated by the service.
+    :vartype content_md5: bytes
+    :ivar transactional_content_hash: Specify the transactional md5 for the body, to be validated
+     by the service.
+    :vartype transactional_content_hash: bytes
+    """
+
+    _attribute_map = {
+        "cache_control": {"key": "cacheControl", "type": "str"},
+        "content_encoding": {"key": "contentEncoding", "type": "str"},
+        "content_language": {"key": "contentLanguage", "type": "str"},
+        "content_disposition": {"key": "contentDisposition", "type": "str"},
+        "content_type": {"key": "contentType", "type": "str"},
+        "content_md5": {"key": "contentMD5", "type": "bytearray"},
+        "transactional_content_hash": {"key": "transactionalContentHash", "type": "bytearray"},
+    }
+
+    def __init__(
+        self,
+        *,
+        cache_control: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_type: Optional[str] = None,
+        content_md5: Optional[bytes] = None,
+        transactional_content_hash: Optional[bytes] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword cache_control: Optional. Sets the blob's cache control. If specified, this property is
+         stored with the blob and returned with a read request.
+        :paramtype cache_control: str
+        :keyword content_encoding: Optional. Sets the blob's content encoding. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype content_encoding: str
+        :keyword content_language: Optional. Set the blob's content language. If specified, this
+         property is stored with the blob and returned with a read request.
+        :paramtype content_language: str
+        :keyword content_disposition: Optional. Sets the blob's Content-Disposition header.
+        :paramtype content_disposition: str
+        :keyword content_type: Optional. Sets the blob's content type. If specified, this property is
+         stored with the blob and returned with a read request.
+        :paramtype content_type: str
+        :keyword content_md5: Specify the transactional md5 for the body, to be validated by the
+         service.
+        :paramtype content_md5: bytes
+        :keyword transactional_content_hash: Specify the transactional md5 for the body, to be
+         validated by the service.
+        :paramtype transactional_content_hash: bytes
+        """
+        super().__init__(**kwargs)
+        self.cache_control = cache_control
+        self.content_encoding = content_encoding
+        self.content_language = content_language
+        self.content_disposition = content_disposition
+        self.content_type = content_type
+        self.content_md5 = content_md5
+        self.transactional_content_hash = transactional_content_hash
+
+
+class PathList(_serialization.Model):
+    """PathList.
+
+    :ivar paths:
+    :vartype paths: list[~azure.storage.filedatalake.models.Path]
+    """
+
+    _attribute_map = {
+        "paths": {"key": "paths", "type": "[Path]"},
+    }
+
+    def __init__(self, *, paths: Optional[List["_models.Path"]] = None, **kwargs: Any) -> None:
+        """
+        :keyword paths:
+        :paramtype paths: list[~azure.storage.filedatalake.models.Path]
+        """
+        super().__init__(**kwargs)
+        self.paths = paths
+
+
+class SetAccessControlRecursiveResponse(_serialization.Model):
+    """SetAccessControlRecursiveResponse.
+
+    :ivar directories_successful:
+    :vartype directories_successful: int
+    :ivar files_successful:
+    :vartype files_successful: int
+    :ivar failure_count:
+    :vartype failure_count: int
+    :ivar failed_entries:
+    :vartype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+    """
+
+    _attribute_map = {
+        "directories_successful": {"key": "directoriesSuccessful", "type": "int"},
+        "files_successful": {"key": "filesSuccessful", "type": "int"},
+        "failure_count": {"key": "failureCount", "type": "int"},
+        "failed_entries": {"key": "failedEntries", "type": "[AclFailedEntry]"},
+    }
+
+    def __init__(
+        self,
+        *,
+        directories_successful: Optional[int] = None,
+        files_successful: Optional[int] = None,
+        failure_count: Optional[int] = None,
+        failed_entries: Optional[List["_models.AclFailedEntry"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword directories_successful:
+        :paramtype directories_successful: int
+        :keyword files_successful:
+        :paramtype files_successful: int
+        :keyword failure_count:
+        :paramtype failure_count: int
+        :keyword failed_entries:
+        :paramtype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+        """
+        super().__init__(**kwargs)
+        self.directories_successful = directories_successful
+        self.files_successful = files_successful
+        self.failure_count = failure_count
+        self.failed_entries = failed_entries
+
+
+class SourceModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+    :vartype source_if_match: str
+    :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching
+     value.
+    :vartype source_if_none_match: str
+    :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has
+     been modified since the specified date/time.
+    :vartype source_if_modified_since: ~datetime.datetime
+    :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has
+     not been modified since the specified date/time.
+    :vartype source_if_unmodified_since: ~datetime.datetime
+    """
+
+    _attribute_map = {
+        "source_if_match": {"key": "sourceIfMatch", "type": "str"},
+        "source_if_none_match": {"key": "sourceIfNoneMatch", "type": "str"},
+        "source_if_modified_since": {"key": "sourceIfModifiedSince", "type": "rfc-1123"},
+        "source_if_unmodified_since": {"key": "sourceIfUnmodifiedSince", "type": "rfc-1123"},
+    }
+
+    def __init__(
+        self,
+        *,
+        source_if_match: Optional[str] = None,
+        source_if_none_match: Optional[str] = None,
+        source_if_modified_since: Optional[datetime.datetime] = None,
+        source_if_unmodified_since: Optional[datetime.datetime] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+        :paramtype source_if_match: str
+        :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a
+         matching value.
+        :paramtype source_if_none_match: str
+        :keyword source_if_modified_since: Specify this header value to operate only on a blob if it
+         has been modified since the specified date/time.
+        :paramtype source_if_modified_since: ~datetime.datetime
+        :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it
+         has not been modified since the specified date/time.
+        :paramtype source_if_unmodified_since: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.source_if_match = source_if_match
+        self.source_if_none_match = source_if_none_match
+        self.source_if_modified_since = source_if_modified_since
+        self.source_if_unmodified_since = source_if_unmodified_since
+
+
+class StorageError(_serialization.Model):
+    """StorageError.
+
+    :ivar error: The service error response object.
+    :vartype error: ~azure.storage.filedatalake.models.StorageErrorError
+    """
+
+    _attribute_map = {
+        "error": {"key": "error", "type": "StorageErrorError"},
+    }
+
+    def __init__(self, *, error: Optional["_models.StorageErrorError"] = None, **kwargs: Any) -> None:
+        """
+        :keyword error: The service error response object.
+        :paramtype error: ~azure.storage.filedatalake.models.StorageErrorError
+        """
+        super().__init__(**kwargs)
+        self.error = error
+
+
+class StorageErrorError(_serialization.Model):
+    """The service error response object.
+
+    :ivar code: The service error code.
+    :vartype code: str
+    :ivar message: The service error message.
+    :vartype message: str
+    """
+
+    _attribute_map = {
+        "code": {"key": "Code", "type": "str"},
+        "message": {"key": "Message", "type": "str"},
+    }
+
+    def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword code: The service error code.
+        :paramtype code: str
+        :keyword message: The service error message.
+        :paramtype message: str
+        """
+        super().__init__(**kwargs)
+        self.code = code
+        self.message = message
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/models/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py
new file mode 100644
index 00000000..56a7ece3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._file_system_operations import FileSystemOperations  # type: ignore
+from ._path_operations import PathOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "FileSystemOperations",
+    "PathOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py
new file mode 100644
index 00000000..235402a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_file_system_operations.py
@@ -0,0 +1,888 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    properties: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_properties_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    properties: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str, *, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_paths_request(
+    url: str,
+    *,
+    recursive: bool,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    continuation: Optional[str] = None,
+    path: Optional[str] = None,
+    max_results: Optional[int] = None,
+    upn: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["filesystem"] = kwargs.pop("resource", _params.pop("resource", "filesystem"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if path is not None:
+        _params["directory"] = _SERIALIZER.query("path", path, "str")
+    _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if upn is not None:
+        _params["upn"] = _SERIALIZER.query("upn", upn, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_blob_hierarchy_segment_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    delimiter: Optional[str] = None,
+    marker: Optional[str] = None,
+    max_results: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+    showonly: Literal["deleted"] = "deleted",
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if delimiter is not None:
+        _params["delimiter"] = _SERIALIZER.query("delimiter", delimiter, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if showonly is not None:
+        _params["showonly"] = _SERIALIZER.query("showonly", showonly, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FileSystemOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`file_system` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create FileSystem.
+
+        Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+        operation fails.  This operation does not support conditional HTTP requests.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        properties: Optional[str] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set FileSystem Properties.
+
+        Set properties for the FileSystem.  This operation supports conditional HTTP requests.  For
+        more information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            properties=properties,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self, request_id_parameter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get FileSystem Properties.
+
+        All system and user-defined filesystem properties are specified in the response headers.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-namespace-enabled"] = self._deserialize(
+            "str", response.headers.get("x-ms-namespace-enabled")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete FileSystem.
+
+        Marks the FileSystem for deletion.  When a FileSystem is deleted, a FileSystem with the same
+        identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+        attempts to create a filesystem with the same identifier will fail with status code 409
+        (Conflict), with the service returning additional error information indicating that the
+        filesystem is being deleted. All other operations, including operations on any files or
+        directories within the filesystem, will fail with status code 404 (Not Found) while the
+        filesystem is being deleted. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if modified_access_conditions is not None:
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def list_paths(
+        self,
+        recursive: bool,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        path: Optional[str] = None,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.PathList:
+        # pylint: disable=line-too-long
+        """List Paths.
+
+        List FileSystem paths and their properties.
+
+        :param recursive: Required. Required.
+        :type recursive: bool
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param path: Optional.  Filters results to paths within the specified directory. An error
+         occurs if the directory does not exist. Default value is None.
+        :type path: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :return: PathList or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.PathList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[_models.PathList] = kwargs.pop("cls", None)
+
+        _request = build_list_paths_request(
+            url=self._config.url,
+            recursive=recursive,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            continuation=continuation,
+            path=path,
+            max_results=max_results,
+            upn=upn,
+            resource=self._config.resource,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        deserialized = self._deserialize("PathList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_blob_hierarchy_segment(
+        self,
+        prefix: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListBlobsIncludeItem]]] = None,
+        showonly: Literal["deleted"] = "deleted",
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListBlobsHierarchySegmentResponse:
+        # pylint: disable=line-too-long
+        """The List Blobs operation returns a list of the blobs under the specified container.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+         element in the response body that acts as a placeholder for all blobs whose names begin with
+         the same substring up to the appearance of the delimiter character. The delimiter may be a
+         single character or a string. Default value is None.
+        :type delimiter: str
+        :param marker: A string value that identifies the portion of the list of containers to be
+         returned with the next listing operation. The operation returns the NextMarker value within the
+         response body if the listing operation did not return all containers remaining to be listed
+         with the current page. The NextMarker value can be used as the value for the marker parameter
+         in a subsequent call to request the next page of list items. The marker value is opaque to the
+         client. Default value is None.
+        :type marker: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+        :param showonly: Include this parameter to specify one or more datasets to include in the
+         response. Known values are "deleted" and None. Default value is "deleted".
+        :type showonly: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: ListBlobsHierarchySegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["container"] = kwargs.pop("restype", _params.pop("restype", "container"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListBlobsHierarchySegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_blob_hierarchy_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            delimiter=delimiter,
+            marker=marker,
+            max_results=max_results,
+            include=include,
+            showonly=showonly,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py
new file mode 100644
index 00000000..b6d6a0a9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_path_operations.py
@@ -0,0 +1,2845 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    resource: Optional[Union[str, _models.PathResourceType]] = None,
+    continuation: Optional[str] = None,
+    mode: Optional[Union[str, _models.PathRenameMode]] = None,
+    cache_control: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    rename_source: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    source_lease_id: Optional[str] = None,
+    properties: Optional[str] = None,
+    permissions: Optional[str] = None,
+    umask: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    source_if_match: Optional[str] = None,
+    source_if_none_match: Optional[str] = None,
+    source_if_modified_since: Optional[datetime.datetime] = None,
+    source_if_unmodified_since: Optional[datetime.datetime] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    acl: Optional[str] = None,
+    proposed_lease_id: Optional[str] = None,
+    lease_duration: Optional[int] = None,
+    expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+    expires_on: Optional[str] = None,
+    encryption_context: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if resource is not None:
+        _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if mode is not None:
+        _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if rename_source is not None:
+        _headers["x-ms-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if source_lease_id is not None:
+        _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if umask is not None:
+        _headers["x-ms-umask"] = _SERIALIZER.header("umask", umask, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if source_if_match is not None:
+        _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str")
+    if source_if_none_match is not None:
+        _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str")
+    if source_if_modified_since is not None:
+        _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header(
+            "source_if_modified_since", source_if_modified_since, "rfc-1123"
+        )
+    if source_if_unmodified_since is not None:
+        _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header(
+            "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123"
+        )
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if expiry_options is not None:
+        _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str")
+    if expires_on is not None:
+        _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str")
+    if encryption_context is not None:
+        _headers["x-ms-encryption-context"] = _SERIALIZER.header("encryption_context", encryption_context, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+    url: str,
+    *,
+    action: Union[str, _models.PathUpdateAction],
+    mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+    content: IO[bytes],
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    max_records: Optional[int] = None,
+    continuation: Optional[str] = None,
+    force_flag: Optional[bool] = None,
+    position: Optional[int] = None,
+    retain_uncommitted_data: Optional[bool] = None,
+    close: Optional[bool] = None,
+    content_length: Optional[int] = None,
+    content_md5: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    cache_control: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    properties: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    permissions: Optional[str] = None,
+    acl: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if max_records is not None:
+        _params["maxRecords"] = _SERIALIZER.query("max_records", max_records, "int", minimum=1)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+    if force_flag is not None:
+        _params["forceFlag"] = _SERIALIZER.query("force_flag", force_flag, "bool")
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if retain_uncommitted_data is not None:
+        _params["retainUncommittedData"] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, "bool")
+    if close is not None:
+        _params["close"] = _SERIALIZER.query("close", close, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if properties is not None:
+        _headers["x-ms-properties"] = _SERIALIZER.header("properties", properties, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_lease_request(
+    url: str,
+    *,
+    x_ms_lease_action: Union[str, _models.PathLeaseAction],
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    x_ms_lease_break_period: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    proposed_lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    x_ms_lease_duration: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("x_ms_lease_action", x_ms_lease_action, "str")
+    if x_ms_lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("x_ms_lease_duration", x_ms_lease_duration, "int")
+    if x_ms_lease_break_period is not None:
+        _headers["x-ms-lease-break-period"] = _SERIALIZER.header(
+            "x_ms_lease_break_period", x_ms_lease_break_period, "int"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_read_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    x_ms_range_get_content_md5: Optional[bool] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if range is not None:
+        _headers["Range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if x_ms_range_get_content_md5 is not None:
+        _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header(
+            "x_ms_range_get_content_md5", x_ms_range_get_content_md5, "bool"
+        )
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+    upn: Optional[bool] = None,
+    lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if action is not None:
+        _params["action"] = _SERIALIZER.query("action", action, "str")
+    if upn is not None:
+        _params["upn"] = _SERIALIZER.query("upn", upn, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    recursive: Optional[bool] = None,
+    continuation: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    paginated: Optional[bool] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if recursive is not None:
+        _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if paginated is not None:
+        _params["paginated"] = _SERIALIZER.query("paginated", paginated, "bool")
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_control_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    permissions: Optional[str] = None,
+    acl: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if permissions is not None:
+        _headers["x-ms-permissions"] = _SERIALIZER.header("permissions", permissions, "str")
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_control_recursive_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+    timeout: Optional[int] = None,
+    continuation: Optional[str] = None,
+    force_flag: Optional[bool] = None,
+    max_records: Optional[int] = None,
+    acl: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["setAccessControlRecursive"] = kwargs.pop(
+        "action", _params.pop("action", "setAccessControlRecursive")
+    )
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    _params["mode"] = _SERIALIZER.query("mode", mode, "str")
+    if force_flag is not None:
+        _params["forceFlag"] = _SERIALIZER.query("force_flag", force_flag, "bool")
+    if max_records is not None:
+        _params["maxRecords"] = _SERIALIZER.query("max_records", max_records, "int", minimum=1)
+
+    # Construct headers
+    if acl is not None:
+        _headers["x-ms-acl"] = _SERIALIZER.header("acl", acl, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_flush_data_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    position: Optional[int] = None,
+    retain_uncommitted_data: Optional[bool] = None,
+    close: Optional[bool] = None,
+    content_length: Optional[int] = None,
+    content_md5: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+    lease_duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    cache_control: Optional[str] = None,
+    content_type_parameter: Optional[str] = None,
+    content_disposition: Optional[str] = None,
+    content_encoding: Optional[str] = None,
+    content_language: Optional[str] = None,
+    if_match: Optional[str] = None,
+    if_none_match: Optional[str] = None,
+    if_modified_since: Optional[datetime.datetime] = None,
+    if_unmodified_since: Optional[datetime.datetime] = None,
+    request_id_parameter: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if retain_uncommitted_data is not None:
+        _params["retainUncommittedData"] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, "bool")
+    if close is not None:
+        _params["close"] = _SERIALIZER.query("close", close, "bool")
+
+    # Construct headers
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if lease_action is not None:
+        _headers["x-ms-lease-action"] = _SERIALIZER.header("lease_action", lease_action, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("cache_control", cache_control, "str")
+    if content_type_parameter is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("content_type_parameter", content_type_parameter, "str")
+    if content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header("content_disposition", content_disposition, "str")
+    if content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("content_encoding", content_encoding, "str")
+    if content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("content_language", content_language, "str")
+    if if_match is not None:
+        _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+    if if_none_match is not None:
+        _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+    if if_modified_since is not None:
+        _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123")
+    if if_unmodified_since is not None:
+        _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_append_data_request(
+    url: str,
+    *,
+    content: IO[bytes],
+    position: Optional[int] = None,
+    timeout: Optional[int] = None,
+    content_length: Optional[int] = None,
+    transactional_content_hash: Optional[bytes] = None,
+    transactional_content_crc64: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+    lease_duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    encryption_key: Optional[str] = None,
+    encryption_key_sha256: Optional[str] = None,
+    encryption_algorithm: Literal["AES256"] = "AES256",
+    flush: Optional[bool] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["action"] = _SERIALIZER.query("action", action, "str")
+    if position is not None:
+        _params["position"] = _SERIALIZER.query("position", position, "int")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if flush is not None:
+        _params["flush"] = _SERIALIZER.query("flush", flush, "bool")
+
+    # Construct headers
+    if content_length is not None:
+        _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int", minimum=0)
+    if transactional_content_hash is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header(
+            "transactional_content_hash", transactional_content_hash, "bytearray"
+        )
+    if transactional_content_crc64 is not None:
+        _headers["x-ms-content-crc64"] = _SERIALIZER.header(
+            "transactional_content_crc64", transactional_content_crc64, "bytearray"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if lease_action is not None:
+        _headers["x-ms-lease-action"] = _SERIALIZER.header("lease_action", lease_action, "str")
+    if lease_duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("lease_duration", lease_duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if encryption_key is not None:
+        _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str")
+    if encryption_key_sha256 is not None:
+        _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header(
+            "encryption_key_sha256", encryption_key_sha256, "str"
+        )
+    if encryption_algorithm is not None:
+        _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_set_expiry_request(
+    url: str,
+    *,
+    expiry_options: Union[str, _models.PathExpiryOptions],
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    expires_on: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str")
+    if expires_on is not None:
+        _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_undelete_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    undelete_source: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if undelete_source is not None:
+        _headers["x-ms-undelete-source"] = _SERIALIZER.header("undelete_source", undelete_source, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class PathOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`path` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        resource: Optional[Union[str, _models.PathResourceType]] = None,
+        continuation: Optional[str] = None,
+        mode: Optional[Union[str, _models.PathRenameMode]] = None,
+        rename_source: Optional[str] = None,
+        source_lease_id: Optional[str] = None,
+        properties: Optional[str] = None,
+        permissions: Optional[str] = None,
+        umask: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        acl: Optional[str] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_duration: Optional[int] = None,
+        expiry_options: Optional[Union[str, _models.PathExpiryOptions]] = None,
+        expires_on: Optional[str] = None,
+        encryption_context: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create File | Create Directory | Rename File | Rename Directory.
+
+        Create or rename a file or directory.    By default, the destination is overwritten and if the
+        destination already exists and has a lease the lease is broken.  This operation supports
+        conditional HTTP requests.  For more information, see `Specifying Conditional Headers for Blob
+        Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+        To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param resource: Required only for Create File and Create Directory. The value must be "file"
+         or "directory". Known values are: "directory" and "file". Default value is None.
+        :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+         behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+         will be "posix". Known values are: "legacy" and "posix". Default value is None.
+        :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+        :param rename_source: An optional file or directory to be renamed.  The value must have the
+         following format: "/{filesystem}/{path}".  If "x-ms-properties" is specified, the properties
+         will overwrite the existing properties; otherwise, the existing properties will be preserved.
+         This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+         characters in the ISO-8859-1 character set. Default value is None.
+        :type rename_source: str
+        :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+         an active lease and the lease ID must match. Default value is None.
+        :type source_lease_id: str
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+         When creating a file or directory and the parent folder does not have a default ACL, the umask
+         restricts the permissions of the file or directory to be created.  The resulting permission is
+         given by p bitwise and not u, where p is the permission and u is the umask.  For example, if p
+         is 0777 and u is 0057, then the resulting permission is 0720.  The default permission is 0777
+         for a directory and 0666 for a file.  The default umask is 0027.  The umask must be specified
+         in 4-digit octal notation (e.g. 0766). Default value is None.
+        :type umask: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Default value is None.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :param encryption_context: Specifies the encryption context to set on the file. Default value
+         is None.
+        :type encryption_context: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _cache_control = None
+        _content_encoding = None
+        _content_language = None
+        _content_disposition = None
+        _content_type_parameter = None
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _source_if_match = None
+        _source_if_none_match = None
+        _source_if_modified_since = None
+        _source_if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if source_modified_access_conditions is not None:
+            _source_if_match = source_modified_access_conditions.source_if_match
+            _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+            _source_if_none_match = source_modified_access_conditions.source_if_none_match
+            _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_create_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            resource=resource,
+            continuation=continuation,
+            mode=mode,
+            cache_control=_cache_control,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            content_disposition=_content_disposition,
+            content_type_parameter=_content_type_parameter,
+            rename_source=rename_source,
+            lease_id=_lease_id,
+            source_lease_id=source_lease_id,
+            properties=properties,
+            permissions=permissions,
+            umask=umask,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            source_if_match=_source_if_match,
+            source_if_none_match=_source_if_none_match,
+            source_if_modified_since=_source_if_modified_since,
+            source_if_unmodified_since=_source_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            owner=owner,
+            group=group,
+            acl=acl,
+            proposed_lease_id=proposed_lease_id,
+            lease_duration=lease_duration,
+            expiry_options=expiry_options,
+            expires_on=expires_on,
+            encryption_context=encryption_context,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def update(
+        self,
+        action: Union[str, _models.PathUpdateAction],
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        body: IO[bytes],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        max_records: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        properties: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> Optional[_models.SetAccessControlRecursiveResponse]:
+        # pylint: disable=line-too-long
+        """Append Data | Flush Data | Set Properties | Set Access Control.
+
+        Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+        sets properties for a file or directory, or sets access control for a file or directory. Data
+        can only be appended to a file. Concurrent writes to the same file using multiple clients are
+        not supported. This operation supports conditional HTTP requests. For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+         flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+         directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+         a file or directory, or  "setAccessControlRecursive" to set the access control list for a
+         directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+         order to use access control.  Also note that the Access Control List (ACL) includes permissions
+         for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+         are mutually exclusive. Known values are: "append", "flush", "setProperties",
+         "setAccessControl", and "setAccessControlRecursive". Required.
+        :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+         maximum number of files or directories on which the acl change will be applied. If omitted or
+         greater than 2,000, the request will process up to 2,000 items. Default value is None.
+        :type max_records: int
+        :param continuation: Optional. The number of paths processed with each invocation is limited.
+         If the number of paths to be processed exceeds this limit, a continuation token is returned in
+         the response header x-ms-continuation. When a continuation token is  returned in the response,
+         it must be percent-encoded and specified in a subsequent invocation of
+         setAccessControlRecursive operation. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+         format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+         is a base64 encoded string. Note that the string may only contain ASCII characters in the
+         ISO-8859-1 character set.  If the filesystem exists, any properties not included in the list
+         will be removed.  All properties are removed if the header is omitted.  To merge new and
+         existing properties, first get all existing properties and the current E-Tag, then make a
+         conditional request with the E-Tag and include values for all properties. Default value is
+         None.
+        :type properties: str
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: SetAccessControlRecursiveResponse or None or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[Optional[_models.SetAccessControlRecursiveResponse]] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        _content = body
+
+        _request = build_update_request(
+            url=self._config.url,
+            action=action,
+            mode=mode,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            max_records=max_records,
+            continuation=continuation,
+            force_flag=force_flag,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            properties=properties,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        deserialized = None
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+            deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if response.status_code == 202:
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-structured-body"] = self._deserialize(
+                "str", response.headers.get("x-ms-structured-body")
+            )
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        x_ms_lease_action: Union[str, _models.PathLeaseAction],
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        x_ms_lease_break_period: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Lease Path.
+
+        Create and manage a lease to restrict write and delete access to the path. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+         and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+         to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+         lease break period is allowed to elapse, during which time no lease operation except break and
+         release can be performed on the file. When a lease is successfully broken, the response
+         indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+         the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+         change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+         existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. Known values
+         are: "acquire", "break", "change", "renew", "release", and "break". Required.
+        :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+         and  specifies the break period of the lease in seconds.  The lease break  duration must be
+         between 0 and 60 seconds. Default value is None.
+        :type x_ms_lease_break_period: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_lease_request(
+            url=self._config.url,
+            x_ms_lease_action=x_ms_lease_action,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            x_ms_lease_break_period=x_ms_lease_break_period,
+            lease_id=_lease_id,
+            proposed_lease_id=proposed_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            x_ms_lease_duration=self._config.x_ms_lease_duration,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 201, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 201:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+
+        if response.status_code == 202:
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-lease-time"] = self._deserialize("str", response.headers.get("x-ms-lease-time"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def read(
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        x_ms_range_get_content_md5: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """Read File.
+
+        Read the contents of a file.  For read operations, range requests are supported. This operation
+        supports conditional HTTP requests.  For more information, see `Specifying Conditional Headers
+        for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+         to be retrieved. Default value is None.
+        :type range: str
+        :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+         together with the Range header, the service returns the MD5 hash for the range, as long as the
+         range is less than or equal to 4MB in size. If this header is specified without the Range
+         header, the service returns status code 400 (Bad Request). If this header is set to true when
+         the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default
+         value is None.
+        :type x_ms_range_get_content_md5: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_read_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            x_ms_range_get_content_md5=x_ms_range_get_content_md5,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        if response.status_code == 206:
+            response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+            response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+            response_headers["Content-Disposition"] = self._deserialize(
+                "str", response.headers.get("Content-Disposition")
+            )
+            response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+            response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+            response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+            response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+            response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+            response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+            response_headers["x-ms-content-md5"] = self._deserialize("str", response.headers.get("x-ms-content-md5"))
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+            response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-resource-type"] = self._deserialize(
+                "str", response.headers.get("x-ms-resource-type")
+            )
+            response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+            response_headers["x-ms-lease-duration"] = self._deserialize(
+                "str", response.headers.get("x-ms-lease-duration")
+            )
+            response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+            response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+            response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+                "bool", response.headers.get("x-ms-request-server-encrypted")
+            )
+            response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+                "str", response.headers.get("x-ms-encryption-key-sha256")
+            )
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        action: Optional[Union[str, _models.PathGetPropertiesAction]] = None,
+        upn: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Get Properties | Get Status | Get Access Control List.
+
+        Get Properties returns all system and user defined properties for a path. Get Status returns
+        all system defined properties for a path. Get Access Control List returns the access control
+        list for a path. This operation supports conditional HTTP requests.  For more information, see
+        `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param action: Optional. If the value is "getStatus" only the system defined properties for the
+         path are returned. If the value is "getAccessControl" the access control list is returned in
+         the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+         properties are returned. Known values are: "getAccessControl" and "getStatus". Default value is
+         None.
+        :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+        :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+         "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+         headers will be transformed from Azure Active Directory Object IDs to User Principal Names.  If
+         "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+         false. Note that group and application Object IDs are not translated because they do not have
+         unique friendly names. Default value is None.
+        :type upn: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            action=action,
+            upn=upn,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-MD5"] = self._deserialize("str", response.headers.get("Content-MD5"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-properties"] = self._deserialize("str", response.headers.get("x-ms-properties"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-permissions"] = self._deserialize("str", response.headers.get("x-ms-permissions"))
+        response_headers["x-ms-acl"] = self._deserialize("str", response.headers.get("x-ms-acl"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        recursive: Optional[bool] = None,
+        continuation: Optional[str] = None,
+        paginated: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Delete File | Delete Directory.
+
+        Delete the file or directory. This operation supports conditional HTTP requests.  For more
+        information, see `Specifying Conditional Headers for Blob Service Operations
+        <https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations>`_.
+
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param recursive: Required. Default value is None.
+        :type recursive: bool
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param paginated: If true, paginated behavior will be seen. Pagination is for the recursive ACL
+         checks as a POSIX requirement in the server and Delete in an atomic operation once the ACL
+         checks are completed. If false or missing, normal default behavior will kick in, which may
+         timeout in case of very large directories due to recursive ACL checks. This new parameter is
+         introduced for backward compatibility. Default value is None.
+        :type paginated: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_delete_request(
+            url=self._config.url,
+            request_id_parameter=request_id_parameter,
+            timeout=timeout,
+            recursive=recursive,
+            continuation=continuation,
+            lease_id=_lease_id,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            paginated=paginated,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        if response.status_code == 200:
+            response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+            response_headers["x-ms-deletion-id"] = self._deserialize("str", response.headers.get("x-ms-deletion-id"))
+
+        if response.status_code == 202:
+            response_headers["Date"] = self._deserialize("str", response.headers.get("Date"))
+            response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+            response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+            response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_access_control(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        permissions: Optional[str] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param owner: Optional. The owner of the blob or directory. Default value is None.
+        :type owner: str
+        :param group: Optional. The owning group of the blob or directory. Default value is None.
+        :type group: str
+        :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+         account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+         Each class may be granted read, write, or execute permission.  The sticky bit is also
+         supported.  Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+         Default value is None.
+        :type permissions: str
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControl"] = kwargs.pop("action", _params.pop("action", "setAccessControl"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+        _request = build_set_access_control_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            permissions=permissions,
+            acl=acl,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_access_control_recursive(
+        self,
+        mode: Union[str, _models.PathSetAccessControlRecursiveMode],
+        timeout: Optional[int] = None,
+        continuation: Optional[str] = None,
+        force_flag: Optional[bool] = None,
+        max_records: Optional[int] = None,
+        acl: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.SetAccessControlRecursiveResponse:
+        # pylint: disable=line-too-long
+        """Set the access control list for a path and sub-paths.
+
+        :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+         modifies one or more POSIX access control rights  that pre-exist on files and directories,
+         "remove" removes one or more POSIX access control rights  that were present earlier on files
+         and directories. Known values are: "set", "modify", and "remove". Required.
+        :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+         the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+         will ignore user errors and proceed with the operation on other sub-entities of the directory.
+         Continuation token will only be returned when forceFlag is true in case of user errors. If not
+         set the default value is false for this. Default value is None.
+        :type force_flag: bool
+        :param max_records: Optional. It specifies the maximum number of files or directories on which
+         the acl change will be applied. If omitted or greater than 2,000, the request will process up
+         to 2,000 items. Default value is None.
+        :type max_records: int
+        :param acl: Sets POSIX access control rights on files and directories. The value is a
+         comma-separated list of access control entries. Each access control entry (ACE) consists of a
+         scope, a type, a user or group identifier, and permissions in the format
+         "[scope:][type]:[id]:[permissions]". Default value is None.
+        :type acl: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: SetAccessControlRecursiveResponse or the result of cls(response)
+        :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["setAccessControlRecursive"] = kwargs.pop(
+            "action", _params.pop("action", "setAccessControlRecursive")
+        )
+        cls: ClsType[_models.SetAccessControlRecursiveResponse] = kwargs.pop("cls", None)
+
+        _request = build_set_access_control_recursive_request(
+            url=self._config.url,
+            mode=mode,
+            timeout=timeout,
+            continuation=continuation,
+            force_flag=force_flag,
+            max_records=max_records,
+            acl=acl,
+            request_id_parameter=request_id_parameter,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-continuation"] = self._deserialize("str", response.headers.get("x-ms-continuation"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("SetAccessControlRecursiveResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def flush_data(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        position: Optional[int] = None,
+        retain_uncommitted_data: Optional[bool] = None,
+        close: Optional[bool] = None,
+        content_length: Optional[int] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Set the owner, group, permissions, or access control list for a path.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param retain_uncommitted_data: Valid only for flush operations.  If "true", uncommitted data
+         is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+         after the flush operation.  The default is false.  Data at offsets less than the specified
+         position are written to the file when flush succeeds, but this optional parameter allows data
+         after the flush position to be retained for a future flush operation. Default value is None.
+        :type retain_uncommitted_data: bool
+        :param close: Azure Storage Events allow applications to receive notifications when files
+         change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+         property indicating whether this is the final change to distinguish the difference between an
+         intermediate flush to a file stream and the final close of a file stream. The close query
+         parameter is valid only when the action is "flush" and change notifications are enabled. If the
+         value of close is "true" and the flush operation completes successfully, the service raises a
+         file change notification with a property indicating that this is the final update (the file
+         stream has been closed). If "false" a change notification is raised indicating the file has
+         changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+         indicate that the file stream has been closed.". Default value is None.
+        :type close: bool
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param modified_access_conditions: Parameter group. Default value is None.
+        :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["flush"] = kwargs.pop("action", _params.pop("action", "flush"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content_md5 = None
+        _lease_id = None
+        _cache_control = None
+        _content_type_parameter = None
+        _content_disposition = None
+        _content_encoding = None
+        _content_language = None
+        _if_match = None
+        _if_none_match = None
+        _if_modified_since = None
+        _if_unmodified_since = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _cache_control = path_http_headers.cache_control
+            _content_disposition = path_http_headers.content_disposition
+            _content_encoding = path_http_headers.content_encoding
+            _content_language = path_http_headers.content_language
+            _content_md5 = path_http_headers.content_md5
+            _content_type_parameter = path_http_headers.content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if modified_access_conditions is not None:
+            _if_match = modified_access_conditions.if_match
+            _if_modified_since = modified_access_conditions.if_modified_since
+            _if_none_match = modified_access_conditions.if_none_match
+            _if_unmodified_since = modified_access_conditions.if_unmodified_since
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+
+        _request = build_flush_data_request(
+            url=self._config.url,
+            timeout=timeout,
+            position=position,
+            retain_uncommitted_data=retain_uncommitted_data,
+            close=close,
+            content_length=content_length,
+            content_md5=_content_md5,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            cache_control=_cache_control,
+            content_type_parameter=_content_type_parameter,
+            content_disposition=_content_disposition,
+            content_encoding=_content_encoding,
+            content_language=_content_language,
+            if_match=_if_match,
+            if_none_match=_if_none_match,
+            if_modified_since=_if_modified_since,
+            if_unmodified_since=_if_unmodified_since,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def append_data(  # pylint: disable=inconsistent-return-statements
+        self,
+        body: IO[bytes],
+        position: Optional[int] = None,
+        timeout: Optional[int] = None,
+        content_length: Optional[int] = None,
+        transactional_content_crc64: Optional[bytes] = None,
+        lease_action: Optional[Union[str, _models.LeaseAction]] = None,
+        lease_duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        flush: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        path_http_headers: Optional[_models.PathHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        cpk_info: Optional[_models.CpkInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Append data to the file.
+
+        :param body: Initial data. Required.
+        :type body: IO[bytes]
+        :param position: This parameter allows the caller to upload data in parallel and control the
+         order in which it is appended to the file.  It is required when uploading data to be appended
+         to the file and when flushing previously uploaded data to the file.  The value must be the
+         position where the data is to be appended.  Uploaded data is not immediately flushed, or
+         written, to the file.  To flush, the previously uploaded data must be contiguous, the position
+         parameter must be specified and equal to the length of the file after all data has been
+         written, and there must not be a request entity body included with the request. Default value
+         is None.
+        :type position: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param content_length: Required for "Append Data" and "Flush Data".  Must be 0 for "Flush
+         Data".  Must be the length of the request content in bytes for "Append Data". Default value is
+         None.
+        :type content_length: int
+        :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+         validated by the service. Default value is None.
+        :type transactional_content_crc64: bytes
+        :param lease_action: Optional. If "acquire" it will acquire the lease. If "auto-renew" it will
+         renew the lease. If "release" it will release the lease only on flush. If "acquire-release" it
+         will acquire & complete the operation & release the lease once operation is done. Known values
+         are: "acquire", "auto-renew", "release", and "acquire-release". Default value is None.
+        :type lease_action: str or ~azure.storage.filedatalake.models.LeaseAction
+        :param lease_duration: The lease duration is required to acquire a lease, and specifies the
+         duration of the lease in seconds.  The lease duration must be between 15 and 60 seconds or -1
+         for infinite lease. Default value is None.
+        :type lease_duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param flush: If file should be flushed after the append. Default value is None.
+        :type flush: bool
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param path_http_headers: Parameter group. Default value is None.
+        :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+        :param cpk_info: Parameter group. Default value is None.
+        :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        action: Literal["append"] = kwargs.pop("action", _params.pop("action", "append"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _transactional_content_hash = None
+        _lease_id = None
+        _encryption_key = None
+        _encryption_key_sha256 = None
+        _encryption_algorithm = None
+        if path_http_headers is not None:
+            _transactional_content_hash = path_http_headers.transactional_content_hash
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        if cpk_info is not None:
+            _encryption_algorithm = cpk_info.encryption_algorithm
+            _encryption_key = cpk_info.encryption_key
+            _encryption_key_sha256 = cpk_info.encryption_key_sha256
+        _content = body
+
+        _request = build_append_data_request(
+            url=self._config.url,
+            position=position,
+            timeout=timeout,
+            content_length=content_length,
+            transactional_content_hash=_transactional_content_hash,
+            transactional_content_crc64=transactional_content_crc64,
+            lease_id=_lease_id,
+            lease_action=lease_action,
+            lease_duration=lease_duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            encryption_key=_encryption_key,
+            encryption_key_sha256=_encryption_key_sha256,
+            encryption_algorithm=_encryption_algorithm,  # type: ignore
+            flush=flush,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            action=action,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-encryption-key-sha256"] = self._deserialize(
+            "str", response.headers.get("x-ms-encryption-key-sha256")
+        )
+        response_headers["x-ms-lease-renewed"] = self._deserialize("bool", response.headers.get("x-ms-lease-renewed"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_expiry(  # pylint: disable=inconsistent-return-statements
+        self,
+        expiry_options: Union[str, _models.PathExpiryOptions],
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        expires_on: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets the time a blob will expire and be deleted.
+
+        :param expiry_options: Required. Indicates mode of the expiry time. Known values are:
+         "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required.
+        :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param expires_on: The time to set the blob to expiry. Default value is None.
+        :type expires_on: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["expiry"] = kwargs.pop("comp", _params.pop("comp", "expiry"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_expiry_request(
+            url=self._config.url,
+            expiry_options=expiry_options,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            expires_on=expires_on,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def undelete(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        undelete_source: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Undelete a path that was previously soft deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+         the soft deleted blob to undelete. Default value is None.
+        :type undelete_source: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_undelete_request(
+            url=self._config.url,
+            timeout=timeout,
+            undelete_source=undelete_source,
+            request_id_parameter=request_id_parameter,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-resource-type"] = self._deserialize("str", response.headers.get("x-ms-resource-type"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py
new file mode 100644
index 00000000..f0baeb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/operations/_service_operations.py
@@ -0,0 +1,208 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, Iterable, Literal, Optional, TypeVar
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_file_systems_request(
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    continuation: Optional[str] = None,
+    max_results: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    timeout: Optional[int] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+    version: Literal["2025-01-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-01-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["resource"] = _SERIALIZER.query("resource", resource, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if continuation is not None:
+        _params["continuation"] = _SERIALIZER.query("continuation", continuation, "str")
+    if max_results is not None:
+        _params["maxResults"] = _SERIALIZER.query("max_results", max_results, "int", minimum=1)
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureDataLakeStorageRESTAPIConfiguration = (
+            input_args.pop(0) if input_args else kwargs.pop("config")
+        )
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def list_file_systems(
+        self,
+        prefix: Optional[str] = None,
+        continuation: Optional[str] = None,
+        max_results: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> Iterable["_models.FileSystem"]:
+        # pylint: disable=line-too-long
+        """List FileSystems.
+
+        List filesystems and their properties in given account.
+
+        :param prefix: Filters results to filesystems within the specified prefix. Default value is
+         None.
+        :type prefix: str
+        :param continuation: Optional.  When deleting a directory, the number of paths that are deleted
+         with each invocation is limited.  If the number of paths to be deleted exceeds this limit, a
+         continuation token is returned in this response header.  When a continuation token is returned
+         in the response, it must be specified in a subsequent invocation of the delete operation to
+         continue deleting the directory. Default value is None.
+        :type continuation: str
+        :param max_results: An optional value that specifies the maximum number of items to return. If
+         omitted or greater than 5,000, the response will include up to 5,000 items. Default value is
+         None.
+        :type max_results: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
+         Timeouts for Blob Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: An iterator like instance of either FileSystem or the result of cls(response)
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystem]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        resource: Literal["account"] = kwargs.pop("resource", _params.pop("resource", "account"))
+        cls: ClsType[_models.FileSystemList] = kwargs.pop("cls", None)
+
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        def prepare_request(next_link=None):
+            if not next_link:
+
+                _request = build_list_file_systems_request(
+                    url=self._config.url,
+                    prefix=prefix,
+                    continuation=continuation,
+                    max_results=max_results,
+                    request_id_parameter=request_id_parameter,
+                    timeout=timeout,
+                    resource=resource,
+                    version=self._config.version,
+                    headers=_headers,
+                    params=_params,
+                )
+                _request.url = self._client.format_url(_request.url)
+
+            else:
+                _request = HttpRequest("GET", next_link)
+                _request.url = self._client.format_url(_request.url)
+                _request.method = "GET"
+            return _request
+
+        def extract_data(pipeline_response):
+            deserialized = self._deserialize("FileSystemList", pipeline_response)
+            list_of_elem = deserialized.filesystems
+            if cls:
+                list_of_elem = cls(list_of_elem)  # type: ignore
+            return None, iter(list_of_elem)
+
+        def get_next(next_link=None):
+            _request = prepare_request(next_link)
+
+            _stream = False
+            pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+                _request, stream=_stream, **kwargs
+            )
+            response = pipeline_response.http_response
+
+            if response.status_code not in [200]:
+                map_error(status_code=response.status_code, response=response, error_map=error_map)
+                error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+                raise HttpResponseError(response=response, model=error)
+
+            return pipeline_response
+
+        return ItemPaged(get_next, extract_data)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed
new file mode 100644
index 00000000..e5aff4f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_generated/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py
new file mode 100644
index 00000000..1120b973
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_list_paths_helper.py
@@ -0,0 +1,173 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from azure.core.paging import PageIterator
+from azure.core.exceptions import HttpResponseError
+from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \
+    return_headers_and_deserialized_path_list
+from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+from ._shared.models import DictMixin
+from ._shared.response_handlers import return_context_and_deserialized
+from ._models import PathProperties
+from ._generated.models import Path
+
+
+class DeletedPathPropertiesPaged(PageIterator):
+    """An Iterable of deleted path properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A path name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+    :ivar str container: The container that the paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+    :param callable command: Function to retrieve the next page of items.
+    """
+    def __init__(
+            self, command,
+            container=None,
+            prefix=None,
+            results_per_page=None,
+            continuation_token=None,
+            delimiter=None,
+            location_mode=None):
+        super(DeletedPathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                max_results=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobItemInternal):
+            file_props = get_deleted_path_properties_from_generated_code(item)
+            file_props.file_system = self.container
+            return file_props
+        if isinstance(item, GenBlobPrefix):
+            return DirectoryPrefix(
+                container=self.container,
+                prefix=item.name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
+
+
+class DirectoryPrefix(DictMixin):
+    """Directory prefix.
+
+    :ivar str name: Name of the deleted directory.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar str file_system: The file system that the deleted paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.file_system = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class PathPropertiesPaged(PageIterator):
+    """An Iterable of Path properties.
+
+    :ivar str path: Filters the results to return only paths under the specified path.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str path: Filters the results to return only paths under the specified path.
+    :param int max_results: The maximum number of paths to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+    def __init__(
+            self, command,
+            recursive,
+            path=None,
+            max_results=None,
+            continuation_token=None,
+            upn=None):
+        super(PathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.recursive = recursive
+        self.results_per_page = max_results
+        self.path = path
+        self.upn = upn
+        self.current_page = None
+        self.path_list = None
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                self.recursive,
+                continuation=continuation_token or None,
+                path=self.path,
+                max_results=self.results_per_page,
+                upn=self.upn,
+                cls=return_headers_and_deserialized_path_list)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.path_list, self._response = get_next_return
+        self.current_page = [self._build_item(item) for item in self.path_list]
+
+        return self._response['continuation'] or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, PathProperties):
+            return item
+        if isinstance(item, Path):
+            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
+            return path
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py
new file mode 100644
index 00000000..d078d1e4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_models.py
@@ -0,0 +1,1158 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+from enum import Enum
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.storage.blob import LeaseProperties as BlobLeaseProperties
+from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions
+from azure.storage.blob import ResourceTypes as BlobResourceTypes
+from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey
+from azure.storage.blob import ContentSettings as BlobContentSettings
+from azure.storage.blob import AccessPolicy as BlobAccessPolicy
+from azure.storage.blob import DelimitedTextDialect as BlobDelimitedTextDialect
+from azure.storage.blob import DelimitedJsonDialect as BlobDelimitedJSON
+from azure.storage.blob import ArrowDialect as BlobArrowDialect
+from azure.storage.blob import ContainerEncryptionScope as BlobContainerEncryptionScope
+from azure.storage.blob import CustomerProvidedEncryptionKey as BlobCustomerProvidedEncryptionKey
+from azure.storage.blob._models import ContainerPropertiesPaged
+from azure.storage.blob._generated.models import Logging as GenLogging, Metrics as GenMetrics, \
+    RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule
+
+from ._shared.models import DictMixin
+from ._shared.parser import _filetime_to_datetime, _rfc_1123_to_datetime
+
+
+class FileSystemProperties(DictMixin):
+    """File System properties class.
+
+    :ivar str name:
+        Name of the filesystem.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the file system was modified.
+    :ivar str etag:
+        The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the file system.
+    :ivar str public_access: Specifies whether data in the file system may be accessed
+        publicly and the level of access.
+    :ivar bool has_immutability_policy:
+        Represents whether the file system has an immutability policy.
+    :ivar bool has_legal_hold:
+        Represents whether the file system has a legal hold.
+    :ivar dict metadata: A dict with name-value pairs to associate with the
+        file system as metadata.
+    :ivar ~azure.storage.filedatalake.EncryptionScopeOptions encryption_scope:
+        The default encryption scope configuration for the file system.
+    :ivar bool deleted:
+        Whether this file system was deleted.
+    :ivar str deleted_version:
+        The version of a deleted file system.
+
+    Returned ``FileSystemProperties`` instances expose these values through a
+    dictionary interface, for example: ``file_system_props["last_modified"]``.
+    Additionally, the file system name is available as ``file_system_props["name"]``.
+    """
+
+    def __init__(self, **kwargs):
+        self.name = None
+        self.last_modified = None
+        self.etag = None
+        self.lease = None
+        self.public_access = None
+        self.has_immutability_policy = None
+        self.has_legal_hold = None
+        self.metadata = None
+        self.deleted = None
+        self.deleted_version = None
+        default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
+        if default_encryption_scope:
+            self.encryption_scope = EncryptionScopeOptions(
+                default_encryption_scope=default_encryption_scope,
+                prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
+            )
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = generated.name
+        props.last_modified = generated.properties.last_modified
+        props.deleted = generated.deleted
+        props.deleted_version = generated.version
+        props.etag = generated.properties.etag
+        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
+        props.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
+            generated.properties.public_access)
+        props.has_immutability_policy = generated.properties.has_immutability_policy
+        props.has_legal_hold = generated.properties.has_legal_hold
+        props.metadata = generated.metadata
+        props.encryption_scope = EncryptionScopeOptions._from_generated(generated)  #pylint: disable=protected-access
+        return props
+
+    @classmethod
+    def _convert_from_container_props(cls, container_properties):
+        container_properties.__class__ = cls
+        container_properties.public_access = PublicAccess._from_generated(  # pylint: disable=protected-access
+            container_properties.public_access)
+        container_properties.lease.__class__ = LeaseProperties
+        return container_properties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+    """An Iterable of File System properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A file system name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str prefix: Filters the results to return only file systems whose names
+        begin with the specified prefix.
+    :param int results_per_page: The maximum number of file system names to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(FileSystemPropertiesPaged, self).__init__(
+            *args,
+            **kwargs
+        )
+
+    @staticmethod
+    def _build_item(item):
+        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
+
+
+class DirectoryProperties(DictMixin):
+    """
+    :ivar str name: name of the directory
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar str acl: The POSIX ACL permissions of the file or directory.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar bool deleted: if the current directory marked as deleted
+    :ivar dict metadata: Name-value pairs associated with the directory as metadata.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the directory.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the directory was modified.
+    :ivar ~datetime.datetime creation_time:
+        Indicates when the directory was created, in UTC.
+    :ivar int remaining_retention_days: The number of days that the directory will be retained
+        before being permanently deleted by the service.
+    :var ~azure.storage.filedatalake.ContentSettings content_settings:
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.etag = kwargs.get('ETag')
+        self.deleted = False
+        self.metadata = kwargs.get('metadata')
+        self.lease = LeaseProperties(**kwargs)
+        self.last_modified = kwargs.get('Last-Modified')
+        self.creation_time = kwargs.get('x-ms-creation-time')
+        self.deleted_time = None
+        self.remaining_retention_days = None
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+
+        # This is being passed directly not coming from headers
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.acl = kwargs.get('acl', None)
+
+
+class FileProperties(DictMixin):
+    """
+    :ivar str name: name of the file
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar str acl: The POSIX ACL permissions of the file or directory.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar bool deleted: if the current file marked as deleted
+    :ivar dict metadata: Name-value pairs associated with the file as metadata.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar ~azure.storage.filedatalake.LeaseProperties lease:
+        Stores all the lease information for the file.
+    :ivar ~datetime.datetime last_modified:
+        A datetime object representing the last time the file was modified.
+    :ivar ~datetime.datetime creation_time:
+        Indicates when the file was created, in UTC.
+    :ivar int size: size of the file
+    :ivar int remaining_retention_days: The number of days that the file will be retained
+        before being permanently deleted by the service.
+    :ivar str encryption_context: Specifies the encryption context to set on the file.
+    :var ~azure.storage.filedatalake.ContentSettings content_settings:
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.etag = kwargs.get('ETag')
+        self.deleted = False
+        self.metadata = kwargs.get('metadata')
+        self.lease = LeaseProperties(**kwargs)
+        self.last_modified = kwargs.get('Last-Modified')
+        self.creation_time = kwargs.get('x-ms-creation-time')
+        self.size = kwargs.get('Content-Length')
+        self.deleted_time = None
+        self.expiry_time = kwargs.get("x-ms-expiry-time")
+        self.remaining_retention_days = None
+        self.content_settings = ContentSettings(**kwargs)
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+
+        # This is being passed directly not coming from headers
+        self.encryption_context = kwargs.get('encryption_context')
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.acl = kwargs.get('acl', None)
+
+
+class PathProperties(DictMixin):
+    """Path properties listed by get_paths api.
+
+    :ivar str name: The full path for a file or directory.
+    :ivar str owner: The owner of the file or directory.
+    :ivar str group: The owning group of the file or directory.
+    :ivar str permissions: The permissions that are set for user, group, and other on the file or directory.
+        Each individual permission is in [r,w,x,-]{3} format.
+    :ivar datetime last_modified:  A datetime object representing the last time the directory/file was modified.
+    :ivar bool is_directory: Is the path a directory or not.
+    :ivar str etag: The ETag contains a value that you can use to perform operations
+        conditionally.
+    :ivar int content_length: The size of file if the path is a file.
+    :ivar datetime creation_time: The creation time of the file/directory.
+    :ivar datetime expiry_time: The expiry time of the file/directory.
+    :ivar str encryption_scope:
+        A predefined encryption scope used to encrypt the data on the service. An encryption
+        scope can be created using the Management API and referenced here by name. If a default
+        encryption scope has been defined at the file system, this value will override it if the
+        file system level scope is configured to allow overrides. Otherwise an error will be raised.
+    :ivar str encryption_context: Specifies the encryption context to set on the file.
+    """
+
+    def __init__(self, **kwargs):
+        self.name = kwargs.pop('name', None)
+        self.owner = kwargs.get('owner', None)
+        self.group = kwargs.get('group', None)
+        self.permissions = kwargs.get('permissions', None)
+        self.last_modified = kwargs.get('last_modified', None)
+        self.is_directory = kwargs.get('is_directory', False)
+        self.etag = kwargs.get('etag', None)
+        self.content_length = kwargs.get('content_length', None)
+        self.creation_time = kwargs.get('creation_time', None)
+        self.expiry_time = kwargs.get('expiry_time', None)
+        self.encryption_scope = kwargs.get('x-ms-encryption-scope', None)
+        self.encryption_context = kwargs.get('x-ms-encryption-context', None)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        path_prop = PathProperties()
+        path_prop.name = generated.name
+        path_prop.owner = generated.owner
+        path_prop.group = generated.group
+        path_prop.permissions = generated.permissions
+        path_prop.last_modified = _rfc_1123_to_datetime(generated.last_modified)
+        path_prop.is_directory = bool(generated.is_directory)
+        path_prop.etag = generated.additional_properties.get('etag')
+        path_prop.content_length = generated.content_length
+        path_prop.creation_time = _filetime_to_datetime(generated.creation_time)
+        path_prop.expiry_time = _filetime_to_datetime(generated.expiry_time)
+        path_prop.encryption_scope = generated.encryption_scope
+        path_prop.encryption_context = generated.encryption_context
+        return path_prop
+
+
+class LeaseProperties(BlobLeaseProperties):
+    """DataLake Lease Properties.
+
+    :ivar str status:
+        The lease status of the file. Possible values: locked|unlocked
+    :ivar str state:
+        Lease state of the file. Possible values: available|leased|expired|breaking|broken
+    :ivar str duration:
+        When a file is leased, specifies whether the lease is of infinite or fixed duration.
+    """
+
+
+class ContentSettings(BlobContentSettings):
+    """The content settings of a file or directory.
+
+    :ivar str content_type:
+        The content type specified for the file or directory. If no content type was
+        specified, the default content type is application/octet-stream.
+    :ivar str content_encoding:
+        If the content_encoding has previously been set
+        for the file, that value is stored.
+    :ivar str content_language:
+        If the content_language has previously been set
+        for the file, that value is stored.
+    :ivar str content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the file, that value is stored.
+    :ivar str cache_control:
+        If the cache_control has previously been set for
+        the file, that value is stored.
+    :ivar bytearray content_md5:
+        If the content_md5 has been set for the file, this response
+        header is stored so that the client can check for message content
+        integrity.
+    :keyword str content_type:
+        The content type specified for the file or directory. If no content type was
+        specified, the default content type is application/octet-stream.
+    :keyword str content_encoding:
+        If the content_encoding has previously been set
+        for the file, that value is stored.
+    :keyword str content_language:
+        If the content_language has previously been set
+        for the file, that value is stored.
+    :keyword str content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the file, that value is stored.
+    :keyword str cache_control:
+        If the cache_control has previously been set for
+        the file, that value is stored.
+    :keyword bytearray content_md5:
+        If the content_md5 has been set for the file, this response
+        header is stored so that the client can check for message content
+        integrity.
+    """
+
+    def __init__(
+            self, **kwargs):
+        super(ContentSettings, self).__init__(
+            **kwargs
+        )
+
+
+class AccountSasPermissions(BlobAccountSasPermissions):
+    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
+                 create=False):
+        super(AccountSasPermissions, self).__init__(
+            read=read, create=create, write=write, list=list,
+            delete=delete
+        )
+
+
+class FileSystemSasPermissions(object):
+    """FileSystemSasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_file_system_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc.
+    :param bool write:
+        Create or write content, properties, metadata. Lease the file system.
+    :param bool delete:
+        Delete the file system.
+    :param bool list:
+        List paths in the file system.
+    :keyword bool add:
+        Append data to a file in the directory.
+    :keyword bool create:
+        Write a new file, snapshot a file, or copy a file to a new file.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, write=False, delete=False, list=False,  # pylint: disable=redefined-builtin
+                 **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = kwargs.pop('create', None)
+        self.write = write
+        self.delete = delete
+        self.list = list
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('l' if self.list else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a FileSystemSasPermissions from a string.
+
+        To specify read, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A FileSystemSasPermissions object
+        :rtype: ~azure.storage.filedatalake.FileSystemSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_list = 'l' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, write=p_write, delete=p_delete,
+                     list=p_list, add=p_add, create=p_create, move=p_move,
+                     execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class DirectorySasPermissions(object):
+    """DirectorySasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_directory_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc.
+    :param bool create:
+        Create a new directory
+    :param bool write:
+        Create or write content, properties, metadata. Lease the directory.
+    :param bool delete:
+        Delete the directory.
+    :keyword bool add:
+        Append data to a file in the directory.
+    :keyword bool list:
+        List any files in the directory. Implies Execute.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, create=False, write=False,
+                 delete=False, **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.list = kwargs.pop('list', None)
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('l' if self.list else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a DirectorySasPermissions from a string.
+
+        To specify read, create, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A DirectorySasPermissions object
+        :rtype: ~azure.storage.filedatalake.DirectorySasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_list = 'l' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add,
+                     list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class FileSasPermissions(object):
+    """FileSasPermissions class to be used with the
+    :func:`~azure.storage.filedatalake.generate_file_sas` function.
+
+    :param bool read:
+        Read the content, properties, metadata etc. Use the file as
+        the source of a read operation.
+    :param bool create:
+        Write a new file.
+    :param bool write:
+        Create or write content, properties, metadata. Lease the file.
+    :param bool delete:
+        Delete the file.
+    :keyword bool add:
+        Append data to the file.
+    :keyword bool move:
+        Move any file in the directory to a new location.
+        Note the move operation can optionally be restricted to the child file or directory owner or
+        the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+        on the parent directory.
+    :keyword bool execute:
+        Get the status (system defined properties) and ACL of any file in the directory.
+        If the caller is the owner, set access control on any file in the directory.
+    :keyword bool manage_ownership:
+        Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+        within a folder that has the sticky bit set.
+    :keyword bool manage_access_control:
+         Allows the user to set permissions and POSIX ACLs on files and directories.
+    """
+
+    def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
+        self.read = read
+        self.add = kwargs.pop('add', None)
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.move = kwargs.pop('move', None)
+        self.execute = kwargs.pop('execute', None)
+        self.manage_ownership = kwargs.pop('manage_ownership', None)
+        self.manage_access_control = kwargs.pop('manage_access_control', None)
+        self._str = (('r' if self.read else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('m' if self.move else '') +
+                     ('e' if self.execute else '') +
+                     ('o' if self.manage_ownership else '') +
+                     ('p' if self.manage_access_control else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create a FileSasPermissions from a string.
+
+        To specify read, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, add, create,
+            write, or delete permissions.
+        :return: A FileSasPermissions object
+        :rtype: ~azure.storage.filedatalake.FileSasPermissions
+        """
+        p_read = 'r' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_move = 'm' in permission
+        p_execute = 'e' in permission
+        p_manage_ownership = 'o' in permission
+        p_manage_access_control = 'p' in permission
+
+        parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add,
+                     move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+                     manage_access_control=p_manage_access_control)
+        return parsed
+
+
+class AccessPolicy(BlobAccessPolicy):
+    """Access Policy class used by the set and get access policy methods in each service.
+
+    A stored access policy can specify the start time, expiry time, and
+    permissions for the Shared Access Signatures with which it's associated.
+    Depending on how you want to control access to your resource, you can
+    specify all of these parameters within the stored access policy, and omit
+    them from the URL for the Shared Access Signature. Doing so permits you to
+    modify the associated signature's behavior at any time, as well as to revoke
+    it. Or you can specify one or more of the access policy parameters within
+    the stored access policy, and the others on the URL. Finally, you can
+    specify all of the parameters on the URL. In this case, you can use the
+    stored access policy to revoke the signature, but not to modify its behavior.
+
+    Together the Shared Access Signature and the stored access policy must
+    include all fields required to authenticate the signature. If any required
+    fields are missing, the request will fail. Likewise, if a field is specified
+    both in the Shared Access Signature URL and in the stored access policy, the
+    request will fail with status code 400 (Bad Request).
+
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    """
+
+    def __init__(self, permission=None, expiry=None, **kwargs):
+        super(AccessPolicy, self).__init__(
+            permission=permission, expiry=expiry, start=kwargs.pop('start', None)
+        )
+
+
+class ResourceTypes(BlobResourceTypes):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g.List File Systems)
+    :param bool file_system:
+        Access to file_system-level APIs (e.g., Create/Delete file system,
+        List Directories/Files)
+    :param bool object:
+        Access to object-level APIs for
+        files(e.g. Create File, etc.)
+    """
+
+    def __init__(self, service=False, file_system=False, object=False  # pylint: disable=redefined-builtin
+                 ):
+        super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
+
+
+class UserDelegationKey(BlobUserDelegationKey):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+
+    :ivar str signed_oid:
+        Object ID of this token.
+    :ivar str signed_tid:
+        Tenant ID of the tenant that issued this token.
+    :ivar str signed_start:
+        The datetime this token becomes valid.
+    :ivar str signed_expiry:
+        The datetime this token expires.
+    :ivar str signed_service:
+        What service this key is valid for.
+    :ivar str signed_version:
+        The version identifier of the REST service that created this token.
+    :ivar str value:
+        The user delegation key.
+    """
+
+    @classmethod
+    def _from_generated(cls, generated):
+        delegation_key = cls()
+        delegation_key.signed_oid = generated.signed_oid
+        delegation_key.signed_tid = generated.signed_tid
+        delegation_key.signed_start = generated.signed_start
+        delegation_key.signed_expiry = generated.signed_expiry
+        delegation_key.signed_service = generated.signed_service
+        delegation_key.signed_version = generated.signed_version
+        delegation_key.value = generated.value
+        return delegation_key
+
+
+class PublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """
+    Specifies whether data in the file system may be accessed publicly and the level of access.
+    """
+
+    FILE = 'blob'
+    """
+    Specifies public read access for files. file data within this file system can be read
+    via anonymous request, but file system data is not available. Clients cannot enumerate
+    files within the container via anonymous request.
+    """
+
+    FILESYSTEM = 'container'
+    """
+    Specifies full public read access for file system and file data. Clients can enumerate
+    files within the file system via anonymous request, but cannot enumerate file systems
+    within the storage account.
+    """
+
+    @classmethod
+    def _from_generated(cls, public_access):
+        if public_access == "blob":  # pylint:disable=no-else-return
+            return cls.File
+        elif public_access == "container":
+            return cls.FileSystem
+
+        return None
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class DelimitedJsonDialect(BlobDelimitedJSON):
+    """Defines the input or output JSON serialization for a datalake query.
+
+    :keyword str delimiter: The line separator character, default value is '\\\\n'.
+    """
+
+
+class DelimitedTextDialect(BlobDelimitedTextDialect):
+    """Defines the input or output delimited (CSV) serialization for a datalake query request.
+
+    :keyword str delimiter:
+        Column separator, defaults to ','.
+    :keyword str quotechar:
+        Field quote, defaults to '"'.
+    :keyword str lineterminator:
+        Record separator, defaults to '\\\\n'.
+    :keyword str escapechar:
+        Escape char, defaults to empty.
+    :keyword bool has_header:
+        Whether the blob data includes headers in the first line. The default value is False, meaning that the
+        data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
+        of the first line.
+    """
+
+
+class ArrowDialect(BlobArrowDialect):
+    """field of an arrow schema.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :param str type: Required.
+    :keyword str name: The name of the field.
+    :keyword int precision: The precision of the field.
+    :keyword int scale: The scale of the field.
+    """
+
+
+class CustomerProvidedEncryptionKey(BlobCustomerProvidedEncryptionKey):
+    """
+    All data in Azure Storage is encrypted at-rest using an account-level encryption key.
+    In versions 2021-06-08 and newer, you can manage the key used to encrypt file contents
+    and application metadata per-file by providing an AES-256 encryption key in requests to the storage service.
+
+    When you use a customer-provided key, Azure Storage does not manage or persist your key.
+    When writing data to a file, the provided key is used to encrypt your data before writing it to disk.
+    A SHA-256 hash of the encryption key is written alongside the file contents,
+    and is used to verify that all subsequent operations against the file use the same encryption key.
+    This hash cannot be used to retrieve the encryption key or decrypt the contents of the file.
+    When reading a file, the provided key is used to decrypt your data after reading it from disk.
+    In both cases, the provided encryption key is securely discarded
+    as soon as the encryption or decryption process completes.
+
+    :param str key_value:
+        Base64-encoded AES-256 encryption key value.
+    :param str key_hash:
+        Base64-encoded SHA256 of the encryption key.
+    :ivar str algorithm:
+        Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
+    """
+
+class EncryptionScopeOptions(BlobContainerEncryptionScope):
+    """The default encryption scope configuration for a file system.
+
+    This scope is used implicitly for all future writes within the file system,
+    but can be overridden per blob operation.
+
+    .. versionadded:: 12.9.0
+
+    :param str default_encryption_scope:
+        Specifies the default encryption scope to set on the file system and use for
+        all future writes.
+    :param bool prevent_encryption_scope_override:
+        If true, prevents any request from specifying a different encryption scope than the scope
+        set on the file system. Default value is false.
+    """
+
+class QuickQueryDialect(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Specifies the quick query input/output dialect."""
+
+    DELIMITEDTEXT = 'DelimitedTextDialect'
+    DELIMITEDJSON = 'DelimitedJsonDialect'
+    PARQUET = 'ParquetDialect'
+
+
+class ArrowType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    INT64 = "int64"
+    BOOL = "bool"
+    TIMESTAMP_MS = "timestamp[ms]"
+    STRING = "string"
+    DOUBLE = "double"
+    DECIMAL = 'decimal'
+
+
+class DataLakeFileQueryError(object):
+    """The error happened during quick query operation.
+
+    :ivar str error:
+        The name of the error.
+    :ivar bool is_fatal:
+        If true, this error prevents further query processing. More result data may be returned,
+        but there is no guarantee that all of the original data will be processed.
+        If false, this error does not prevent further query processing.
+    :ivar str description:
+        A description of the error.
+    :ivar int position:
+        The blob offset at which the error occurred.
+    """
+
+    def __init__(self, error=None, is_fatal=False, description=None, position=None):
+        self.error = error
+        self.is_fatal = is_fatal
+        self.description = description
+        self.position = position
+
+
+class AccessControlChangeCounters(DictMixin):
+    """
+    AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively.
+
+    :ivar int directories_successful:
+        Number of directories where Access Control List has been updated successfully.
+    :ivar int files_successful:
+        Number of files where Access Control List has been updated successfully.
+    :ivar int failure_count:
+        Number of paths where Access Control List update has failed.
+    """
+
+    def __init__(self, directories_successful, files_successful, failure_count):
+        self.directories_successful = directories_successful
+        self.files_successful = files_successful
+        self.failure_count = failure_count
+
+
+class AccessControlChangeResult(DictMixin):
+    """
+    AccessControlChangeResult contains result of operations that change Access Control Lists recursively.
+
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters:
+        Contains counts of paths changed from start of the operation.
+    :ivar str continuation:
+        Optional continuation token.
+        Value is present when operation is split into multiple batches and can be used to resume progress.
+    """
+
+    def __init__(self, counters, continuation):
+        self.counters = counters
+        self.continuation = continuation
+
+
+class AccessControlChangeFailure(DictMixin):
+    """
+    Represents an entry that failed to update Access Control List.
+
+    :ivar str name:
+        Name of the entry.
+    :ivar bool is_directory:
+        Indicates whether the entry is a directory.
+    :ivar str error_message:
+        Indicates the reason why the entry failed to update.
+    """
+
+    def __init__(self, name, is_directory, error_message):
+        self.name = name
+        self.is_directory = is_directory
+        self.error_message = error_message
+
+
+class AccessControlChanges(DictMixin):
+    """
+    AccessControlChanges contains batch and cumulative counts of operations
+    that change Access Control Lists recursively.
+    Additionally it exposes path entries that failed to update while these operations progress.
+
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters:
+        Contains counts of paths changed within single batch.
+    :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters:
+        Contains counts of paths changed from start of the operation.
+    :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures:
+        List of path entries that failed to update Access Control List within single batch.
+    :ivar str continuation:
+        An opaque continuation token that may be used to resume the operations in case of failures.
+    """
+
+    def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
+        self.batch_counters = batch_counters
+        self.aggregate_counters = aggregate_counters
+        self.batch_failures = batch_failures
+        self.continuation = continuation
+
+
+class DeletedPathProperties(DictMixin):
+    """
+    Properties populated for a deleted path.
+
+    :ivar str name:
+        The name of the file in the path.
+    :ivar ~datetime.datetime deleted_time:
+        A datetime object representing the time at which the path was deleted.
+    :ivar int remaining_retention_days:
+        The number of days that the path will be retained before being permanently deleted by the service.
+    :ivar str deletion_id:
+        The id associated with the deleted path.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('name')
+        self.deleted_time = None
+        self.remaining_retention_days = None
+        self.deletion_id = None
+
+
+class AnalyticsLogging(GenLogging):
+    """Azure Analytics Logging settings.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool delete:
+        Indicates whether all delete requests should be logged. The default value is `False`.
+    :keyword bool read:
+        Indicates whether all read requests should be logged. The default value is `False`.
+    :keyword bool write:
+        Indicates whether all write requests should be logged. The default value is `False`.
+    :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    def __init__(self, **kwargs):
+        self.version = kwargs.get('version', '1.0')
+        self.delete = kwargs.get('delete', False)
+        self.read = kwargs.get('read', False)
+        self.write = kwargs.get('write', False)
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            delete=generated.delete,
+            read=generated.read,
+            write=generated.write,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class Metrics(GenMetrics):
+    """A summary of request statistics grouped by API in hour or minute aggregates.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool enabled:
+        Indicates whether metrics are enabled for the Datalake service.
+        The default value is `False`.
+    :keyword bool include_apis:
+        Indicates whether metrics should generate summary statistics for called API operations.
+    :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist. If not specified the retention
+        policy will be disabled by default.
+    """
+
+    def __init__(self, **kwargs):
+        self.version = kwargs.get('version', '1.0')
+        self.enabled = kwargs.get('enabled', False)
+        self.include_apis = kwargs.get('include_apis')
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            enabled=generated.enabled,
+            include_apis=generated.include_apis,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class RetentionPolicy(GenRetentionPolicy):
+    """The retention policy which determines how long the associated data should
+    persist.
+
+    :param bool enabled:
+        Indicates whether a retention policy is enabled for the storage service.
+        The default value is False.
+    :param int days:
+        Indicates the number of days that metrics or logging or
+        soft-deleted data should be retained. All data older than this value will
+        be deleted. If enabled=True, the number of days must be specified.
+    """
+
+    def __init__(self, enabled=False, days=None):
+        super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None)
+        if self.enabled and (self.days is None):
+            raise ValueError("If policy is enabled, 'days' must be specified.")
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            days=generated.days,
+        )
+
+
+class StaticWebsite(GenStaticWebsite):
+    """The properties that enable an account to host a static website.
+
+    :keyword bool enabled:
+        Indicates whether this account is hosting a static website.
+        The default value is `False`.
+    :keyword str index_document:
+        The default name of the index page under each directory.
+    :keyword str error_document404_path:
+        The absolute path of the custom 404 page.
+    :keyword str default_index_document_path:
+        Absolute path of the default index page.
+    """
+
+    def __init__(self, **kwargs):
+        self.enabled = kwargs.get('enabled', False)
+        if self.enabled:
+            self.index_document = kwargs.get('index_document')
+            self.error_document404_path = kwargs.get('error_document404_path')
+            self.default_index_document_path = kwargs.get('default_index_document_path')
+        else:
+            self.index_document = None
+            self.error_document404_path = None
+            self.default_index_document_path = None
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            index_document=generated.index_document,
+            error_document404_path=generated.error_document404_path,
+            default_index_document_path=generated.default_index_document_path
+        )
+
+
+class CorsRule(GenCorsRule):
+    """CORS is an HTTP feature that enables a web application running under one
+    domain to access resources in another domain. Web browsers implement a
+    security restriction known as same-origin policy that prevents a web page
+    from calling APIs in a different domain; CORS provides a secure way to
+    allow one domain (the origin domain) to call APIs in another domain.
+
+    :param list(str) allowed_origins:
+        A list of origin domains that will be allowed via CORS, or "*" to allow
+        all domains. The list of must contain at least one entry. Limited to 64
+        origin domains. Each allowed origin can have up to 256 characters.
+    :param list(str) allowed_methods:
+        A list of HTTP methods that are allowed to be executed by the origin.
+        The list of must contain at least one entry. For Azure Storage,
+        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+    :keyword list(str) allowed_headers:
+        Defaults to an empty list. A list of headers allowed to be part of
+        the cross-origin request. Limited to 64 defined headers and 2 prefixed
+        headers. Each header can be up to 256 characters.
+    :keyword list(str) exposed_headers:
+        Defaults to an empty list. A list of response headers to expose to CORS
+        clients. Limited to 64 defined headers and two prefixed headers. Each
+        header can be up to 256 characters.
+    :keyword int max_age_in_seconds:
+        The number of seconds that the client/browser should cache a
+        preflight response.
+    """
+
+    def __init__(self, allowed_origins, allowed_methods, **kwargs):
+        self.allowed_origins = ','.join(allowed_origins)
+        self.allowed_methods = ','.join(allowed_methods)
+        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+    @classmethod
+    def _from_generated(cls, generated):
+        return cls(
+            [generated.allowed_origins],
+            [generated.allowed_methods],
+            allowed_headers=[generated.allowed_headers],
+            exposed_headers=[generated.exposed_headers],
+            max_age_in_seconds=generated.max_age_in_seconds,
+        )
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py
new file mode 100644
index 00000000..5130ef44
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_path_client.py
@@ -0,0 +1,1118 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import re
+from datetime import datetime
+from typing import (
+    Any, Dict, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import urlparse, quote
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from azure.storage.blob import BlobClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._deserialize import process_storage_error
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \
+    AccessControlChangeCounters, AccessControlChangeFailure
+from ._serialize import (
+    add_metadata_headers,
+    compare_api_versions,
+    convert_datetime_to_rfc1123,
+    convert_dfs_url_to_blob_url,
+    get_access_conditions,
+    get_api_version,
+    get_cpk_info,
+    get_lease_id,
+    get_mod_conditions,
+    get_path_http_headers,
+    get_source_mod_conditions,
+)
+from ._shared.base_client import StorageAccountHostsMixin, parse_query
+from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import ContentSettings, FileProperties
+
+
+class PathClient(StorageAccountHostsMixin):
+    """A base client for interacting with a DataLake file/directory, even if the file/directory may not
+    yet exist.
+
+    :param str account_url:
+        The URI to the storage account.
+    :param str file_system_name:
+        The file system for the directory or files.
+    :param str file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+            self, account_url: str,
+            file_system_name: str,
+            path_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        try:
+            if not account_url.lower().startswith('http'):
+                account_url = "https://" + account_url
+        except AttributeError as exc:
+            raise ValueError("Account URL must be a string.") from exc
+        parsed_url = urlparse(account_url.rstrip('/'))
+
+        # remove the preceding/trailing delimiter from the path components
+        file_system_name = file_system_name.strip('/')
+
+        # the name of root directory is /
+        if path_name != '/':
+            path_name = path_name.strip('/')
+
+        if not (file_system_name and path_name):
+            raise ValueError("Please specify a file system name and file path.")
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {account_url}")
+
+        blob_account_url = convert_dfs_url_to_blob_url(account_url)
+        self._blob_account_url = blob_account_url
+
+        datalake_hosts = kwargs.pop('_hosts', None)
+        blob_hosts = None
+        if datalake_hosts:
+            blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+            blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+        self._blob_client = BlobClient(blob_account_url, file_system_name, path_name,
+                                       credential=credential, _hosts=blob_hosts, **kwargs)
+
+        _, sas_token = parse_query(parsed_url.query)
+        self.file_system_name = file_system_name
+        self.path_name = path_name
+
+        self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+        super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+                                         _hosts=datalake_hosts, **kwargs)
+        # ADLS doesn't support secondary endpoint, make sure it's empty
+        self._hosts[LocationMode.SECONDARY] = ""
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client(self.url)
+        self._datalake_client_for_blob_operation = self._build_generated_client(self._blob_client.url)
+
+    def _build_generated_client(self, url: str) -> AzureDataLakeStorageRESTAPI:
+        client = AzureDataLakeStorageRESTAPI(
+            url,
+            base_url=url,
+            file_system=self.file_system_name,
+            path=self.path_name,
+            pipeline=self._pipeline
+        )
+        client._config.version = self._api_version  # pylint: disable=protected-access
+        return client
+
+    def __exit__(self, *args):
+        self._blob_client.close()
+        self._datalake_client_for_blob_operation.close()
+        super(PathClient, self).__exit__(*args)
+
+    def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self.__exit__()
+
+    def _format_url(self, hostname):
+        file_system_name = self.file_system_name
+        if isinstance(file_system_name, str):
+            file_system_name = file_system_name.encode('UTF-8')
+        return (f"{self.scheme}://{hostname}/{quote(file_system_name)}/"
+                f"{quote(self.path_name, safe='~')}{self._query_str}")
+
+    def _create_path_options(self, resource_type,
+                             content_settings=None,  # type: Optional[ContentSettings]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> Dict[str, Any]
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        cpk_info = get_cpk_info(self.scheme, kwargs)
+
+        expires_on = kwargs.pop('expires_on', None)
+        if expires_on:
+            try:
+                expires_on = convert_datetime_to_rfc1123(expires_on)
+                kwargs['expiry_options'] = 'Absolute'
+            except AttributeError:
+                expires_on = str(expires_on)
+                kwargs['expiry_options'] = 'RelativeToNow'
+
+        options = {
+            'resource': resource_type,
+            'properties': add_metadata_headers(metadata),
+            'permissions': kwargs.pop('permissions', None),
+            'umask': kwargs.pop('umask', None),
+            'owner': kwargs.pop('owner', None),
+            'group': kwargs.pop('group', None),
+            'acl': kwargs.pop('acl', None),
+            'proposed_lease_id': kwargs.pop('lease_id', None),
+            'lease_duration': kwargs.pop('lease_duration', None),
+            'expiry_options': kwargs.pop('expiry_options', None),
+            'expires_on': expires_on,
+            'path_http_headers': path_http_headers,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'cpk_info': cpk_info,
+            'timeout': kwargs.pop('timeout', None),
+            'encryption_context': kwargs.pop('encryption_context', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create directory or file
+
+        :param resource_type:
+            Required for Create File and Create Directory.
+            The value must be "file" or "directory". Possible values include:
+            'directory', 'file'
+        :type resource_type: str
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file/directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        lease_id = kwargs.get('lease_id', None)
+        lease_duration = kwargs.get('lease_duration', None)
+        if lease_id and not lease_duration:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        if lease_duration and not lease_id:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        options = self._create_path_options(
+            resource_type,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _delete_path_options(paginated: Optional[bool], **kwargs) -> Dict[str, Any]:
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'paginated': paginated,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'cls': return_response_headers,
+            'timeout': kwargs.pop('timeout', None)}
+        options.update(kwargs)
+        return options
+
+    def _delete(self, **kwargs):
+        # type: (**Any) -> Dict[Union[datetime, str]]
+        """
+        Marks the specified path for deletion.
+
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary containing information about the deleted path.
+        :rtype: dict[str, Any]
+        """
+        # Perform paginated delete only if using OAuth, deleting a directory, and api version is 2023-08-03 or later
+        # The pagination is only for ACL checks, the final request remains the atomic delete operation
+        paginated = None
+        if (compare_api_versions(self.api_version, '2023-08-03') >= 0 and
+            hasattr(self.credential, 'get_token') and
+            kwargs.get('recursive')):  # Directory delete will always specify recursive
+            paginated = True
+
+        options = self._delete_path_options(paginated, **kwargs)
+        try:
+            response_headers = self._client.path.delete(**options)
+            # Loop until continuation token is None for paginated delete
+            while response_headers['continuation']:
+                response_headers = self._client.path.delete(
+                    continuation=response_headers['continuation'],
+                    **options)
+
+            return response_headers
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs):
+        # type: (...) -> Dict[str, Any]
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'owner': owner,
+            'group': group,
+            'permissions': permissions,
+            'acl': acl,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def set_access_control(self, owner=None,  # type: Optional[str]
+                           group=None,  # type: Optional[str]
+                           permissions=None,  # type: Optional[str]
+                           acl=None,  # type: Optional[str]
+                           **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Set the owner, group, permissions, or access control list for a path.
+
+        :param owner:
+            Optional. The owner of the file or directory.
+        :type owner: str
+        :param group:
+            Optional. The owning group of the file or directory.
+        :type group: str
+        :param permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+            permissions and acl are mutually exclusive.
+        :type permissions: str
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+            permissions and acl are mutually exclusive.
+        :type acl: str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        if not any([owner, group, permissions, acl]):
+            raise ValueError("At least one parameter should be set for set_access_control API")
+        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+        try:
+            return self._client.path.set_access_control(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _get_access_control_options(upn=None,  # type: Optional[bool]
+                                    **kwargs):
+        # type: (...) -> Dict[str, Any]
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+
+        options = {
+            'action': 'getAccessControl',
+            'upn': upn if upn else False,
+            'lease_access_conditions': access_conditions,
+            'modified_access_conditions': mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def get_access_control(self, upn=None,  # type: Optional[bool]
+                           **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """
+        :param upn: Optional.
+            Valid only when Hierarchical Namespace is
+            enabled for the account. If "true", the user identity values returned
+            in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+            transformed from Azure Active Directory Object IDs to User Principal
+            Names.  If "false", the values will be returned as Azure Active
+            Directory Object IDs. The default value is false. Note that group and
+            application Object IDs are not translated because they do not have
+            unique friendly names.
+        :type upn: bool
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options with no modifications.
+        :rtype: dict[str, Any]
+        """
+        options = self._get_access_control_options(upn=upn, **kwargs)
+        try:
+            return self._client.path.get_properties(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @staticmethod
+    def _set_access_control_recursive_options(mode, acl, **kwargs):
+        # type: (str, str, **Any) -> Dict[str, Any]
+
+        options = {
+            'mode': mode,
+            'force_flag': kwargs.pop('continue_on_failure', None),
+            'timeout': kwargs.pop('timeout', None),
+            'continuation': kwargs.pop('continuation_token', None),
+            'max_records': kwargs.pop('batch_size', None),
+            'acl': acl,
+            'cls': return_headers_and_deserialized}
+        options.update(kwargs)
+        return options
+
+    @distributed_trace
+    def set_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Sets the Access Control on a path and sub-paths.
+
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    @distributed_trace
+    def update_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Modifies the Access Control on a path and sub-paths.
+
+        :param acl:
+            Modifies POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    @distributed_trace
+    def remove_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Removes the Access Control on a path and sub-paths.
+
+        :param acl:
+            Removes POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, and a user or
+            group identifier in the format "[scope:][type]:[id]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed then,
+            continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: ~azure.storage.filedatalake.AccessControlChangeResult
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+        return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                 max_batches=max_batches)
+
+    def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+        try:
+            continue_on_failure = options.get('force_flag')
+            total_directories_successful = 0
+            total_files_success = 0
+            total_failure_count = 0
+            batch_count = 0
+            last_continuation_token = None
+            current_continuation_token = None
+            continue_operation = True
+            while continue_operation:
+                headers, resp = self._client.path.set_access_control_recursive(**options)
+
+                # make a running tally so that we can report the final results
+                total_directories_successful += resp.directories_successful
+                total_files_success += resp.files_successful
+                total_failure_count += resp.failure_count
+                batch_count += 1
+                current_continuation_token = headers['continuation']
+
+                if current_continuation_token is not None:
+                    last_continuation_token = current_continuation_token
+
+                if progress_hook is not None:
+                    progress_hook(AccessControlChanges(
+                        batch_counters=AccessControlChangeCounters(
+                            directories_successful=resp.directories_successful,
+                            files_successful=resp.files_successful,
+                            failure_count=resp.failure_count,
+                        ),
+                        aggregate_counters=AccessControlChangeCounters(
+                            directories_successful=total_directories_successful,
+                            files_successful=total_files_success,
+                            failure_count=total_failure_count,
+                        ),
+                        batch_failures=[AccessControlChangeFailure(
+                            name=failure.name,
+                            is_directory=failure.type == 'DIRECTORY',
+                            error_message=failure.error_message) for failure in resp.failed_entries],
+                        continuation=last_continuation_token))
+
+                # update the continuation token, if there are more operations that cannot be completed in a single call
+                max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+                continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+                options['continuation'] = current_continuation_token
+
+            # currently the service stops on any failure, so we should send back the last continuation token
+            # for the user to retry the failed updates
+            # otherwise we should just return what the service gave us
+            return AccessControlChangeResult(counters=AccessControlChangeCounters(
+                directories_successful=total_directories_successful,
+                files_successful=total_files_success,
+                failure_count=total_failure_count),
+                continuation=last_continuation_token
+                if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+        except HttpResponseError as error:
+            error.continuation_token = last_continuation_token
+            process_storage_error(error)
+        except AzureError as error:
+            error.continuation_token = last_continuation_token
+            raise error
+
+    def _parse_rename_path(self, new_name: str) -> Tuple[str, str, Optional[str]]:
+        new_name = new_name.strip('/')
+        new_file_system = new_name.split('/')[0]
+        new_path = new_name[len(new_file_system):].strip('/')
+
+        new_sas = None
+        sas_split = new_path.split('?')
+        # If there is a ?, there could be a SAS token
+        if len(sas_split) > 0:
+            # Check last element for SAS by looking for sv= and sig=
+            potential_sas = sas_split[-1]
+            if re.search(r'sv=\d{4}-\d{2}-\d{2}', potential_sas) and 'sig=' in potential_sas:
+                new_sas = potential_sas
+                # Remove SAS from new path
+                new_path = new_path[:-(len(new_sas) + 1)]
+
+        if not new_sas:
+            if not self._raw_credential and new_file_system != self.file_system_name:
+                raise ValueError("please provide the sas token for the new file")
+            if not self._raw_credential and new_file_system == self.file_system_name:
+                new_sas = self._query_str.strip('?')
+
+        return new_file_system, new_path, new_sas
+
+    def _rename_path_options(self,
+                             rename_source,  # type: str
+                             content_settings=None,  # type: Optional[ContentSettings]
+                             metadata=None,  # type: Optional[Dict[str, str]]
+                             **kwargs):
+        # type: (...) -> Dict[str, Any]
+        if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None):
+            raise ValueError("metadata, permissions, umask is not supported for this operation")
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        source_lease_id = get_lease_id(kwargs.pop('source_lease', None))
+        mod_conditions = get_mod_conditions(kwargs)
+        source_mod_conditions = get_source_mod_conditions(kwargs)
+
+        path_http_headers = None
+        if content_settings:
+            path_http_headers = get_path_http_headers(content_settings)
+
+        options = {
+            'rename_source': rename_source,
+            'path_http_headers': path_http_headers,
+            'lease_access_conditions': access_conditions,
+            'source_lease_id': source_lease_id,
+            'modified_access_conditions': mod_conditions,
+            'source_modified_access_conditions': source_mod_conditions,
+            'timeout': kwargs.pop('timeout', None),
+            'mode': 'legacy',
+            'cls': return_response_headers}
+        options.update(kwargs)
+        return options
+
+    def _rename_path(self, rename_source, **kwargs):
+        # type: (str, **Any) -> Dict[str, Any]
+        """
+        Rename directory or file
+
+        :param rename_source:
+            The value must have the following format: "/{filesystem}/{path}".
+        :type rename_source: str
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing information about the renamed path.
+        :rtype: dict[str, Any]
+        """
+        options = self._rename_path_options(
+            rename_source,
+            **kwargs)
+        try:
+            return self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_path_properties(self, **kwargs):
+        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file or directory. It does not return the content of the directory or file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file/directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User 
+            Principal Names in the owner, group, and acl fields of the respective property object returned.
+            If False, the values will be returned as Azure Active Directory Object IDs.
+            The default value is False. Note that group and application Object IDs are not translate
+            because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties or FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../tests/test_blob_samples_common.py
+                :start-after: [START get_blob_properties]
+                :end-before: [END get_blob_properties]
+                :language: python
+                :dedent: 8
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        path_properties = self._blob_client.get_blob_properties(**kwargs)
+        return path_properties
+
+    def _exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a path exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a path exists, False otherwise.
+        :rtype: bool
+        """
+        return self._blob_client.exists(**kwargs)
+
+    @distributed_trace
+    def set_metadata(self, metadata,  # type: Dict[str, str]
+                     **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace
+    def set_http_headers(self, content_settings: Optional["ContentSettings"] = None, **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """Sets system properties on the file or directory.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set file/directory properties.
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file/directory-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+        """
+        return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+    @distributed_trace
+    def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
+                      lease_id=None,  # type: Optional[str]
+                      **kwargs):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file or directory does not have an active lease,
+        the DataLake service creates a lease on the file/directory and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
+        lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py
new file mode 100644
index 00000000..b4f44c36
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_quick_query_helper.py
@@ -0,0 +1,73 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Union, Iterable, IO
+
+
+class DataLakeFileQueryReader(object):
+    """A streaming object to read query results.
+
+    :ivar str name:
+        The name of the blob being quered.
+    :ivar str container:
+        The name of the container where the blob is.
+    :ivar dict response_headers:
+        The response_headers of the quick query request.
+    :ivar bytes record_delimiter:
+        The delimiter used to separate lines, or records with the data. The `records`
+        method will return these lines via a generator.
+    """
+
+    def __init__(
+        self,
+        blob_query_reader
+    ):
+        self.name = blob_query_reader.name
+        self.file_system = blob_query_reader.container
+        self.response_headers = blob_query_reader.response_headers
+        self.record_delimiter = blob_query_reader.record_delimiter
+        self._bytes_processed = 0
+        self._blob_query_reader = blob_query_reader
+
+    def __len__(self):
+        return len(self._blob_query_reader)
+
+    def readall(self):
+        # type: () -> Union[bytes, str]
+        """Return all query results.
+
+        This operation is blocking until all data is downloaded.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: All query results.
+        :rtype: Union[bytes, str]
+        """
+        return self._blob_query_reader.readall()
+
+    def readinto(self, stream):
+        # type: (IO) -> None
+        """Download the query result to a stream.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream.
+        :returns: None
+        """
+        self._blob_query_reader(stream)
+
+    def records(self):
+        # type: () -> Iterable[Union[bytes, str]]
+        """Returns a record generator for the query result.
+
+        Records will be returned line by line.
+        If encoding has been configured - this will be used to decode individual
+        records are they are received.
+
+        :returns: A record generator for the query result.
+        :rtype: Iterable[Union[bytes, str]]
+        """
+        return self._blob_query_reader.records()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py
new file mode 100644
index 00000000..c0866a32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_serialize.py
@@ -0,0 +1,185 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Dict, Optional, Union
+
+from azure.storage.blob._serialize import _get_match_headers
+from ._shared import encode_base64
+from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \
+    SourceModifiedAccessConditions, LeaseAccessConditions, CpkInfo
+
+
+_SUPPORTED_API_VERSIONS = [
+    '2019-02-02',
+    '2019-07-07',
+    '2019-10-10',
+    '2019-12-12',
+    '2020-02-10',
+    '2020-04-08',
+    '2020-06-12',
+    '2020-08-04',
+    '2020-10-02',
+    '2020-12-06',
+    '2021-02-12',
+    '2021-04-10',
+    '2021-06-08',
+    '2021-08-06',
+    '2021-12-02',
+    '2022-11-02',
+    '2023-01-03',
+    '2023-05-03',
+    '2023-08-03',
+    '2023-11-03',
+    '2024-05-04',
+    '2024-08-04',
+    '2024-11-04',
+    '2025-01-05',
+    '2025-05-05',
+]  # This list must be in chronological order!
+
+
+def get_api_version(kwargs):
+    # type: (Dict[str, Any]) -> str
+    api_version = kwargs.get('api_version', None)
+    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
+        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
+        raise ValueError(f"Unsupported API version '{api_version}'. Please select from:\n{versions}")
+    return api_version or _SUPPORTED_API_VERSIONS[-1]
+
+
+def compare_api_versions(version1: str, version2: str) -> int:
+    v1 = _SUPPORTED_API_VERSIONS.index(version1)
+    v2 = _SUPPORTED_API_VERSIONS.index(version2)
+    if v1 == v2:
+        return 0
+    if v1 < v2:
+        return -1
+    return 1
+
+
+def convert_dfs_url_to_blob_url(dfs_account_url):
+    return dfs_account_url.replace('.dfs.', '.blob.', 1)
+
+
+def convert_datetime_to_rfc1123(date):
+    weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()]
+    month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
+             "Oct", "Nov", "Dec"][date.month - 1]
+    return f"{weekday}, {date.day:02} {month} {date.year:04} {date.hour:02}:{date.minute:02}:{date.second:02} GMT"
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> str
+    if not metadata:
+        return None
+    headers = []
+    if metadata:
+        for key, value in metadata.items():
+            headers.append(key + '=')
+            headers.append(encode_base64(value))
+            headers.append(',')
+
+    if headers:
+        del headers[-1]
+
+    return ''.join(headers)
+
+
+def get_mod_conditions(kwargs):
+    # type: (Dict[str, Any]) -> ModifiedAccessConditions
+    if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
+    return ModifiedAccessConditions(
+        if_modified_since=kwargs.pop('if_modified_since', None),
+        if_unmodified_since=kwargs.pop('if_unmodified_since', None),
+        if_match=if_match or kwargs.pop('if_match', None),
+        if_none_match=if_none_match or kwargs.pop('if_none_match', None)
+    )
+
+
+def get_source_mod_conditions(kwargs):
+    # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
+    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+    return SourceModifiedAccessConditions(
+        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+        source_if_match=if_match or kwargs.pop('source_if_match', None),
+        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
+    )
+
+
+def get_path_http_headers(content_settings):
+    path_headers = PathHTTPHeaders(
+        cache_control=content_settings.cache_control,
+        content_type=content_settings.content_type,
+        content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+        content_encoding=content_settings.content_encoding,
+        content_language=content_settings.content_language,
+        content_disposition=content_settings.content_disposition
+    )
+    return path_headers
+
+
+def get_access_conditions(lease):
+    # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
+    try:
+        lease_id = lease.id # type: ignore
+    except AttributeError:
+        lease_id = lease  # type: ignore
+    return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+
+def get_lease_id(lease):
+    if not lease:
+        return ""
+    try:
+        lease_id = lease.id
+    except AttributeError:
+        lease_id = lease
+    return lease_id
+
+
+def get_lease_action_properties(kwargs: Dict[str, Any]) -> Dict[str, Any]:
+    lease_action = kwargs.pop('lease_action', None)
+    lease_duration = kwargs.pop('lease_duration', None)
+    lease = kwargs.pop('lease', None)
+    try:
+        lease_id = lease.id
+    except AttributeError:
+        lease_id = lease
+
+    proposed_lease_id = None
+    access_conditions = None
+
+    # Acquiring a new lease
+    if lease_action in ['acquire', 'acquire-release']:
+        # Use provided lease id as the new lease id
+        proposed_lease_id = lease_id
+        # Assign a default lease duration if not provided
+        lease_duration = lease_duration or -1
+    else:
+        # Use lease id as access conditions
+        access_conditions = LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+    return {
+        'lease_action': lease_action,
+        'lease_duration': lease_duration,
+        'proposed_lease_id': proposed_lease_id,
+        'lease_access_conditions': access_conditions
+    }
+
+
+def get_cpk_info(scheme, kwargs):
+    # type: (str, Dict[str, Any]) -> CpkInfo
+    cpk = kwargs.pop('cpk', None)
+    if cpk:
+        if scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        return CpkInfo(
+            encryption_key=cpk.key_value,
+            encryption_key_sha256=cpk.key_hash,
+            encryption_algorithm=cpk.algorithm)
+
+    return None
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py
new file mode 100644
index 00000000..a8b1a27d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/__init__.py
@@ -0,0 +1,54 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote # type: ignore
+
+
+def url_quote(url):
+    return quote(url)
+
+
+def url_unquote(url):
+    return unquote(url)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+    decoded_bytes = decode_base64_to_bytes(data)
+    return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+    if key_is_base64:
+        key = decode_base64_to_bytes(key)
+    else:
+        if isinstance(key, str):
+            key = key.encode('utf-8')
+    if isinstance(string_to_sign, str):
+        string_to_sign = string_to_sign.encode('utf-8')
+    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+    digest = signed_hmac_sha256.digest()
+    encoded_digest = encode_base64(digest)
+    return encoded_digest
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py
new file mode 100644
index 00000000..b41f2391
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/authentication.py
@@ -0,0 +1,245 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import re
+from typing import List, Tuple
+from urllib.parse import unquote, urlparse
+from functools import cmp_to_key
+
+try:
+    from yarl import URL
+except ImportError:
+    pass
+
+try:
+    from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+except ImportError:
+    AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+logger = logging.getLogger(__name__)
+
+
+table_lv0 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725,
+    0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e,
+    0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51,
+    0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9,
+    0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25,
+    0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99,
+    0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0,
+]
+
+table_lv4 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+]
+
+def compare(lhs: str, rhs: str) -> int:  # pylint:disable=too-many-return-statements
+    tables = [table_lv0, table_lv4]
+    curr_level, i, j, n = 0, 0, 0, len(tables)
+    lhs_len = len(lhs)
+    rhs_len = len(rhs)
+    while curr_level < n:
+        if curr_level == (n - 1) and i != j:
+            if i > j:
+                return -1
+            if i < j:
+                return 1
+            return 0
+
+        w1 = tables[curr_level][ord(lhs[i])] if i < lhs_len else 0x1
+        w2 = tables[curr_level][ord(rhs[j])] if j < rhs_len else 0x1
+
+        if w1 == 0x1 and w2 == 0x1:
+            i = 0
+            j = 0
+            curr_level += 1
+        elif w1 == w2:
+            i += 1
+            j += 1
+        elif w1 == 0:
+            i += 1
+        elif w2 == 0:
+            j += 1
+        else:
+            if w1 < w2:
+                return -1
+            if w1 > w2:
+                return 1
+            return 0
+    return 0
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+    msg = ""
+    if ex.args:
+        msg = ex.args[0]
+    return desired_type(msg)
+
+# This method attempts to emulate the sorting done by the service
+def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
+
+    # Build dict of tuples and list of keys
+    header_dict = {}
+    header_keys = []
+    for k, v in input_headers:
+        header_dict[k] = v
+        header_keys.append(k)
+
+    try:
+        header_keys = sorted(header_keys, key=cmp_to_key(compare))
+    except ValueError as exc:
+        raise ValueError("Illegal character encountered when sorting headers.") from exc
+
+    # Build list of sorted tuples
+    sorted_headers = []
+    for key in header_keys:
+        sorted_headers.append((key, header_dict.pop(key)))
+    return sorted_headers
+
+
+class AzureSigningError(ClientAuthenticationError):
+    """
+    Represents a fatal error when attempting to sign a request.
+    In general, the cause of this exception is user error. For example, the given account key is not valid.
+    Please visit https://learn.microsoft.com/azure/storage/common/storage-create-storage-account for more info.
+    """
+
+
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+    def __init__(self, account_name, account_key):
+        self.account_name = account_name
+        self.account_key = account_key
+        super(SharedKeyCredentialPolicy, self).__init__()
+
+    @staticmethod
+    def _get_headers(request, headers_to_sign):
+        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+        if 'content-length' in headers and headers['content-length'] == '0':
+            del headers['content-length']
+        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+    @staticmethod
+    def _get_verb(request):
+        return request.http_request.method + '\n'
+
+    def _get_canonicalized_resource(self, request):
+        uri_path = urlparse(request.http_request.url).path
+        try:
+            if isinstance(request.context.transport, AioHttpTransport) or \
+                    isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+                    isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+                               AioHttpTransport):
+                uri_path = URL(uri_path)
+                return '/' + self.account_name + str(uri_path)
+        except TypeError:
+            pass
+        return '/' + self.account_name + uri_path
+
+    @staticmethod
+    def _get_canonicalized_headers(request):
+        string_to_sign = ''
+        x_ms_headers = []
+        for name, value in request.http_request.headers.items():
+            if name.startswith('x-ms-'):
+                x_ms_headers.append((name.lower(), value))
+        x_ms_headers = _storage_header_sort(x_ms_headers)
+        for name, value in x_ms_headers:
+            if value is not None:
+                string_to_sign += ''.join([name, ':', value, '\n'])
+        return string_to_sign
+
+    @staticmethod
+    def _get_canonicalized_resource_query(request):
+        sorted_queries = list(request.http_request.query.items())
+        sorted_queries.sort()
+
+        string_to_sign = ''
+        for name, value in sorted_queries:
+            if value is not None:
+                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+        return string_to_sign
+
+    def _add_authorization_header(self, request, string_to_sign):
+        try:
+            signature = sign_string(self.account_key, string_to_sign)
+            auth_string = 'SharedKey ' + self.account_name + ':' + signature
+            request.http_request.headers['Authorization'] = auth_string
+        except Exception as ex:
+            # Wrap any error that occurred as signing error
+            # Doing so will clarify/locate the source of problem
+            raise _wrap_exception(ex, AzureSigningError) from ex
+
+    def on_request(self, request):
+        string_to_sign = \
+            self._get_verb(request) + \
+            self._get_headers(
+                request,
+                [
+                    'content-encoding', 'content-language', 'content-length',
+                    'content-md5', 'content-type', 'date', 'if-modified-since',
+                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+                ]
+            ) + \
+            self._get_canonicalized_headers(request) + \
+            self._get_canonicalized_resource(request) + \
+            self._get_canonicalized_resource_query(request)
+
+        self._add_authorization_header(request, string_to_sign)
+        # logger.debug("String_to_sign=%s", string_to_sign)
+
+
+class StorageHttpChallenge(object):
+    def __init__(self, challenge):
+        """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """
+        if not challenge:
+            raise ValueError("Challenge cannot be empty")
+
+        self._parameters = {}
+        self.scheme, trimmed_challenge = challenge.strip().split(" ", 1)
+
+        # name=value pairs either comma or space separated with values possibly being
+        # enclosed in quotes
+        for item in re.split('[, ]', trimmed_challenge):
+            comps = item.split("=")
+            if len(comps) == 2:
+                key = comps[0].strip(' "')
+                value = comps[1].strip(' "')
+                if key:
+                    self._parameters[key] = value
+
+        # Extract and verify required parameters
+        self.authorization_uri = self._parameters.get('authorization_uri')
+        if not self.authorization_uri:
+            raise ValueError("Authorization Uri not found")
+
+        self.resource_id = self._parameters.get('resource_id')
+        if not self.resource_id:
+            raise ValueError("Resource id not found")
+
+        uri_path = urlparse(self.authorization_uri).path.lstrip("/")
+        self.tenant_id = uri_path.split("/")[0]
+
+    def get_value(self, key):
+        return self._parameters.get(key)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py
new file mode 100644
index 00000000..ceb75bf0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client.py
@@ -0,0 +1,458 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+import uuid
+from typing import (
+    Any,
+    cast,
+    Dict,
+    Iterator,
+    Optional,
+    Tuple,
+    TYPE_CHECKING,
+    Union,
+)
+from urllib.parse import parse_qs, quote
+
+from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential, TokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import HttpTransport, RequestsTransport  # pylint: disable=non-abstract-transport-import, no-name-in-module
+from azure.core.pipeline.policies import (
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+    ProxyPolicy,
+    RedirectPolicy,
+    UserAgentPolicy,
+)
+
+from .authentication import SharedKeyCredentialPolicy
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import LocationMode, StorageConfiguration
+from .policies import (
+    ExponentialRetry,
+    QueueMessagePolicy,
+    StorageBearerTokenCredentialPolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageLoggingPolicy,
+    StorageRequestHook,
+    StorageResponseHook,
+)
+from .request_handlers import serialize_batch_body, _get_batch_request_delimiter
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .shared_access_signature import QueryStringConstants
+from .._version import VERSION
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class StorageAccountHostsMixin(object):
+    _client: Any
+    def __init__(
+        self,
+        parsed_url: Any,
+        service: str,
+        credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+        self._hosts = kwargs.get("_hosts")
+        self.scheme = parsed_url.scheme
+        self._is_localhost = False
+
+        if service not in ["blob", "queue", "file-share", "dfs"]:
+            raise ValueError(f"Invalid service: {service}")
+        service_name = service.split('-')[0]
+        account = parsed_url.netloc.split(f".{service_name}.core.")
+
+        self.account_name = account[0] if len(account) > 1 else None
+        if not self.account_name and parsed_url.netloc.startswith("localhost") \
+                or parsed_url.netloc.startswith("127.0.0.1"):
+            self._is_localhost = True
+            self.account_name = parsed_url.path.strip("/")
+
+        self.credential = _format_shared_key_credential(self.account_name, credential)
+        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+            raise ValueError("Token credential is only supported with HTTPS.")
+
+        secondary_hostname = None
+        if hasattr(self.credential, "account_name"):
+            self.account_name = self.credential.account_name
+            secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}"
+
+        if not self._hosts:
+            if len(account) > 1:
+                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+            if kwargs.get("secondary_hostname"):
+                secondary_hostname = kwargs["secondary_hostname"]
+            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+        self._sdk_moniker = f"storage-{service}/{VERSION}"
+        self._config, self._pipeline = self._create_pipeline(self.credential, sdk_moniker=self._sdk_moniker, **kwargs)
+
+    def __enter__(self):
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._client.__exit__(*args)
+
+    def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self._client.close()
+
+    @property
+    def url(self):
+        """The full endpoint URL to this entity, including SAS token if used.
+
+        This could be either the primary endpoint,
+        or the secondary endpoint depending on the current :func:`location_mode`.
+        :returns: The full endpoint URL to this entity, including SAS token if used.
+        :rtype: str
+        """
+        return self._format_url(self._hosts[self._location_mode])
+
+    @property
+    def primary_endpoint(self):
+        """The full primary endpoint URL.
+
+        :rtype: str
+        """
+        return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+    @property
+    def primary_hostname(self):
+        """The hostname of the primary endpoint.
+
+        :rtype: str
+        """
+        return self._hosts[LocationMode.PRIMARY]
+
+    @property
+    def secondary_endpoint(self):
+        """The full secondary endpoint URL if configured.
+
+        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: str
+        :raise ValueError:
+        """
+        if not self._hosts[LocationMode.SECONDARY]:
+            raise ValueError("No secondary host configured.")
+        return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+    @property
+    def secondary_hostname(self):
+        """The hostname of the secondary endpoint.
+
+        If not available this will be None. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: Optional[str]
+        """
+        return self._hosts[LocationMode.SECONDARY]
+
+    @property
+    def location_mode(self):
+        """The location mode that the client is currently using.
+
+        By default this will be "primary". Options include "primary" and "secondary".
+
+        :rtype: str
+        """
+
+        return self._location_mode
+
+    @location_mode.setter
+    def location_mode(self, value):
+        if self._hosts.get(value):
+            self._location_mode = value
+            self._client._config.url = self.url  # pylint: disable=protected-access
+        else:
+            raise ValueError(f"No host URL for location mode: {value}")
+
+    @property
+    def api_version(self):
+        """The version of the Storage API used for requests.
+
+        :rtype: str
+        """
+        return self._client._config.version  # pylint: disable=protected-access
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            credential = cast(str, credential)
+            query_str += credential.lstrip("?")
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, Pipeline]:
+        self._credential_policy: Any = None
+        if hasattr(credential, "get_token"):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = StorageBearerTokenCredentialPolicy(cast(TokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+
+        config = kwargs.get("_configuration") or create_configuration(**kwargs)
+        if kwargs.get("_pipeline"):
+            return config, kwargs["_pipeline"]
+        transport = kwargs.get("transport")
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            transport = RequestsTransport(**kwargs)
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            RedirectPolicy(**kwargs),
+            StorageHosts(hosts=self._hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            StorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs)
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  # type: ignore
+        config.transport = transport  # type: ignore
+        return config, Pipeline(transport, policies=policies)
+
+    def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An iterator of HttpResponse objects.
+        :rtype: Iterator[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        batch_id = str(uuid.uuid1())
+
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version,
+                "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False)
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        Pipeline._prepare_multipart_mixed_request(request)  # pylint: disable=protected-access
+        body = serialize_batch_body(request.multipart_mixed_info[0], batch_id)
+        request.set_bytes_body(body)
+
+        temp = request.multipart_mixed_info
+        request.multipart_mixed_info = None
+        pipeline_response = self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+        request.multipart_mixed_info = temp
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts()
+            if raise_on_any_failure:
+                parts = list(response.parts())
+                if any(p for p in parts if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts
+                    )
+                    raise error
+                return iter(parts)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+
+class TransportWrapper(HttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, transport):
+        self._transport = transport
+
+    def send(self, request, **kwargs):
+        return self._transport.send(request, **kwargs)
+
+    def open(self):
+        pass
+
+    def close(self):
+        pass
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, *args):
+        pass
+
+
+def _format_shared_key_credential(
+    account_name: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None  # pylint: disable=line-too-long
+) -> Any:
+    if isinstance(credential, str):
+        if not account_name:
+            raise ValueError("Unable to determine account name for shared key credential.")
+        credential = {"account_name": account_name, "account_key": credential}
+    if isinstance(credential, dict):
+        if "account_name" not in credential:
+            raise ValueError("Shared key credential missing 'account_name")
+        if "account_key" not in credential:
+            raise ValueError("Shared key credential missing 'account_key")
+        return SharedKeyCredentialPolicy(**credential)
+    if isinstance(credential, AzureNamedKeyCredential):
+        return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key)
+    return credential
+
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]]]:  # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+
+def create_configuration(**kwargs: Any) -> StorageConfiguration:
+     # Backwards compatibility if someone is not passing sdk_moniker
+    if not kwargs.get("sdk_moniker"):
+        kwargs["sdk_moniker"] = f"storage-{kwargs.pop('storage_sdk')}/{VERSION}"
+    config = StorageConfiguration(**kwargs)
+    config.headers_policy = StorageHeadersPolicy(**kwargs)
+    config.user_agent_policy = UserAgentPolicy(**kwargs)
+    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+    config.logging_policy = StorageLoggingPolicy(**kwargs)
+    config.proxy_policy = ProxyPolicy(**kwargs)
+    return config
+
+
+def parse_query(query_str: str) -> Tuple[Optional[str], Optional[str]]:
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+    sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values]
+    sas_token = None
+    if sas_params:
+        sas_token = "&".join(sas_params)
+
+    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+    return snapshot, sas_token
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py
new file mode 100644
index 00000000..6186b29d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/base_client_async.py
@@ -0,0 +1,280 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# mypy: disable-error-code="attr-defined"
+
+import logging
+from typing import Any, cast, Dict, Optional, Tuple, TYPE_CHECKING, Union
+
+from azure.core.async_paging import AsyncList
+from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+from azure.core.credentials_async import AsyncTokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.policies import (
+    AsyncRedirectPolicy,
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import StorageConfiguration
+from .policies import (
+    QueueMessagePolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageRequestHook,
+)
+from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+_LOGGER = logging.getLogger(__name__)
+
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+    def __enter__(self):
+        raise TypeError("Async client only supports 'async with'.")
+
+    def __exit__(self, *args):
+        pass
+
+    async def __aenter__(self):
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._client.__aexit__(*args)
+
+    async def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self._client.close()
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            query_str += credential.lstrip("?")  # type: ignore [union-attr]
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]] = None, # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, AsyncPipeline]:
+        self._credential_policy: Optional[
+            Union[AsyncStorageBearerTokenCredentialPolicy,
+            SharedKeyCredentialPolicy,
+            AzureSasCredentialPolicy]] = None
+        if hasattr(credential, 'get_token'):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(
+                                        cast(AsyncTokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+        config = kwargs.get('_configuration') or create_configuration(**kwargs)
+        if kwargs.get('_pipeline'):
+            return config, kwargs['_pipeline']
+        transport = kwargs.get('transport')
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            try:
+                from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+            except ImportError as exc:
+                raise ImportError("Unable to create async transport. Please check aiohttp is installed.") from exc
+            transport = AioHttpTransport(**kwargs)
+        hosts = self._hosts
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            AsyncRedirectPolicy(**kwargs),
+            StorageHosts(hosts=hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            AsyncStorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs),
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  #type: ignore
+        config.transport = transport #type: ignore
+        return config, AsyncPipeline(transport, policies=policies) #type: ignore
+
+    async def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> AsyncList["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An AsyncList of HttpResponse objects.
+        :rtype: AsyncList[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)  # type: ignore
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        pipeline_response = await self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts() # Return an AsyncIterator
+            if raise_on_any_failure:
+                parts_list = []
+                async for part in parts:
+                    parts_list.append(part)
+                if any(p for p in parts_list if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts_list
+                    )
+                    raise error
+                return AsyncList(parts_list)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]]]: # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, async_transport):
+        self._transport = async_transport
+
+    async def send(self, request, **kwargs):
+        return await self._transport.send(request, **kwargs)
+
+    async def open(self):
+        pass
+
+    async def close(self):
+        pass
+
+    async def __aenter__(self):
+        pass
+
+    async def __aexit__(self, *args):
+        pass
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py
new file mode 100644
index 00000000..0b4b029a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/constants.py
@@ -0,0 +1,19 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from .._serialize import _SUPPORTED_API_VERSIONS
+
+
+X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1]
+
+# Default socket timeouts, in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 60
+
+DEFAULT_OAUTH_SCOPE = "/.default"
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py
new file mode 100644
index 00000000..183d6f64
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/models.py
@@ -0,0 +1,585 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.configuration import Configuration
+from azure.core.pipeline.policies import UserAgentPolicy
+
+
+def get_enum_value(value):
+    if value is None or value in ["None", ""]:
+        return None
+    try:
+        return value.value
+    except AttributeError:
+        return value
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    # Generic storage values
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+
+    # Blob values
+    APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
+    BLOB_ACCESS_TIER_NOT_SUPPORTED_FOR_ACCOUNT_TYPE = "BlobAccessTierNotSupportedForAccountType"
+    BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
+    BLOB_NOT_FOUND = "BlobNotFound"
+    BLOB_OVERWRITTEN = "BlobOverwritten"
+    BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
+    BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
+    BLOCK_LIST_TOO_LONG = "BlockListTooLong"
+    CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
+    CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
+    CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
+    CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
+    CONTAINER_DISABLED = "ContainerDisabled"
+    CONTAINER_NOT_FOUND = "ContainerNotFound"
+    CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
+    COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
+    COPY_ID_MISMATCH = "CopyIdMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
+    INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
+    INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead.
+    INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
+    INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
+    INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
+    INVALID_BLOB_TIER = "InvalidBlobTier"
+    INVALID_BLOB_TYPE = "InvalidBlobType"
+    INVALID_BLOCK_ID = "InvalidBlockId"
+    INVALID_BLOCK_LIST = "InvalidBlockList"
+    INVALID_OPERATION = "InvalidOperation"
+    INVALID_PAGE_RANGE = "InvalidPageRange"
+    INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
+    INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
+    INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
+    LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
+    LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
+    LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
+    LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
+    LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
+    LEASE_ID_MISSING = "LeaseIdMissing"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
+    LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
+    LEASE_LOST = "LeaseLost"
+    LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
+    LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
+    LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
+    MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
+    NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
+    OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
+    PENDING_COPY_OPERATION = "PendingCopyOperation"
+    PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
+    SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
+    SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
+    SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
+    SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead.
+    SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    SNAPSHOTS_PRESENT = "SnapshotsPresent"
+    SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
+    SYSTEM_IN_USE = "SystemInUse"
+    TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
+    UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
+    BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
+    BLOB_ARCHIVED = "BlobArchived"
+    BLOB_NOT_ARCHIVED = "BlobNotArchived"
+
+    # Queue values
+    INVALID_MARKER = "InvalidMarker"
+    MESSAGE_NOT_FOUND = "MessageNotFound"
+    MESSAGE_TOO_LARGE = "MessageTooLarge"
+    POP_RECEIPT_MISMATCH = "PopReceiptMismatch"
+    QUEUE_ALREADY_EXISTS = "QueueAlreadyExists"
+    QUEUE_BEING_DELETED = "QueueBeingDeleted"
+    QUEUE_DISABLED = "QueueDisabled"
+    QUEUE_NOT_EMPTY = "QueueNotEmpty"
+    QUEUE_NOT_FOUND = "QueueNotFound"
+
+    # File values
+    CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory"
+    CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay"
+    DELETE_PENDING = "DeletePending"
+    DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty"
+    FILE_LOCK_CONFLICT = "FileLockConflict"
+    FILE_SHARE_PROVISIONED_BANDWIDTH_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedBandwidthDowngradeNotAllowed"
+    FILE_SHARE_PROVISIONED_IOPS_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedIopsDowngradeNotAllowed"
+    INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName"
+    PARENT_NOT_FOUND = "ParentNotFound"
+    READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute"
+    SHARE_ALREADY_EXISTS = "ShareAlreadyExists"
+    SHARE_BEING_DELETED = "ShareBeingDeleted"
+    SHARE_DISABLED = "ShareDisabled"
+    SHARE_NOT_FOUND = "ShareNotFound"
+    SHARING_VIOLATION = "SharingViolation"
+    SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress"
+    SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded"
+    SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported"
+    SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots"
+    CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed"
+
+    # DataLake values
+    CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero'
+    PATH_ALREADY_EXISTS = 'PathAlreadyExists'
+    INVALID_FLUSH_POSITION = 'InvalidFlushPosition'
+    INVALID_PROPERTY_NAME = 'InvalidPropertyName'
+    INVALID_SOURCE_URI = 'InvalidSourceUri'
+    UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion'
+    FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound'
+    PATH_NOT_FOUND = 'PathNotFound'
+    RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound'
+    SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound'
+    DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted'
+    FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists'
+    FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted'
+    INVALID_DESTINATION_PATH = 'InvalidDestinationPath'
+    INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath'
+    INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType'
+    LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken'
+    LEASE_NAME_MISMATCH = 'LeaseNameMismatch'
+    PATH_CONFLICT = 'PathConflict'
+    SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+    def __setitem__(self, key, item):
+        self.__dict__[key] = item
+
+    def __getitem__(self, key):
+        return self.__dict__[key]
+
+    def __repr__(self):
+        return str(self)
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __delitem__(self, key):
+        self.__dict__[key] = None
+
+    # Compare objects by comparing all attributes.
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    # Compare objects by comparing all attributes.
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+    def __contains__(self, key):
+        return key in self.__dict__
+
+    def has_key(self, k):
+        return k in self.__dict__
+
+    def update(self, *args, **kwargs):
+        return self.__dict__.update(*args, **kwargs)
+
+    def keys(self):
+        return [k for k in self.__dict__ if not k.startswith('_')]
+
+    def values(self):
+        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def items(self):
+        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def get(self, key, default=None):
+        if key in self.__dict__:
+            return self.__dict__[key]
+        return default
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g., Get/Set Service Properties,
+        Get Service Stats, List Containers/Queues/Shares)
+    :param bool container:
+        Access to container-level APIs (e.g., Create/Delete Container,
+        Create/Delete Queue, Create/Delete Share,
+        List Blobs/Files and Directories)
+    :param bool object:
+        Access to object-level APIs for blobs, queue messages, and
+        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+    """
+
+    service: bool = False
+    container: bool = False
+    object: bool = False
+    _str: str
+
+    def __init__(
+        self,
+        service: bool = False,
+        container: bool = False,
+        object: bool = False  # pylint: disable=redefined-builtin
+    ) -> None:
+        self.service = service
+        self.container = container
+        self.object = object
+        self._str = (('s' if self.service else '') +
+                ('c' if self.container else '') +
+                ('o' if self.object else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create a ResourceTypes from a string.
+
+        To specify service, container, or object you need only to
+        include the first letter of the word in the string. E.g. service and container,
+        you would provide a string "sc".
+
+        :param str string: Specify service, container, or object in
+            in the string with the first letter of the word.
+        :return: A ResourceTypes object
+        :rtype: ~azure.storage.blob.ResourceTypes
+        """
+        res_service = 's' in string
+        res_container = 'c' in string
+        res_object = 'o' in string
+
+        parsed = cls(res_service, res_container, res_object)
+        parsed._str = string
+        return parsed
+
+
+class AccountSasPermissions(object):
+    """
+    :class:`~ResourceTypes` class to be used with generate_account_sas
+    function and for the AccessPolicies used with set_*_acl. There are two types of
+    SAS which may be used to grant resource access. One is to grant access to a
+    specific resource (resource-specific). Another is to grant access to the
+    entire service for a specific account and allow certain operations based on
+    perms found here.
+
+    :param bool read:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits read permissions to the specified resource type.
+    :param bool write:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits write permissions to the specified resource type.
+    :param bool delete:
+        Valid for Container and Object resource types, except for queue messages.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool list:
+        Valid for Service and Container resource types only.
+    :param bool add:
+        Valid for the following Object resource types only: queue messages, and append blobs.
+    :param bool create:
+        Valid for the following Object resource types only: blobs and files.
+        Users can create new blobs or files, but may not overwrite existing
+        blobs or files.
+    :param bool update:
+        Valid for the following Object resource types only: queue messages.
+    :param bool process:
+        Valid for the following Object resource type only: queue messages.
+    :keyword bool tag:
+        To enable set or get tags on the blobs in the container.
+    :keyword bool filter_by_tags:
+        To enable get blobs by tags, this should be used together with list permission.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+        Valid for Object resource type of Blob only.
+    """
+
+    read: bool = False
+    write: bool = False
+    delete: bool = False
+    delete_previous_version: bool = False
+    list: bool = False
+    add: bool = False
+    create: bool = False
+    update: bool = False
+    process: bool = False
+    tag: bool = False
+    filter_by_tags: bool = False
+    set_immutability_policy: bool = False
+    permanent_delete: bool = False
+
+    def __init__(
+        self,
+        read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,  # pylint: disable=redefined-builtin
+        add: bool = False,
+        create: bool = False,
+        update: bool = False,
+        process: bool = False,
+        delete_previous_version: bool = False,
+        **kwargs
+    ) -> None:
+        self.read = read
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.list = list
+        self.add = add
+        self.create = create
+        self.update = update
+        self.process = process
+        self.tag = kwargs.pop('tag', False)
+        self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('l' if self.list else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('u' if self.update else '') +
+                     ('p' if self.process else '') +
+                     ('f' if self.filter_by_tags else '') +
+                     ('t' if self.tag else '') +
+                     ('i' if self.set_immutability_policy else '')
+                     )
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create AccountSasPermissions from a string.
+
+        To specify read, write, delete, etc. permissions you need only to
+        include the first letter of the word in the string. E.g. for read and write
+        permissions you would provide a string "rw".
+
+        :param str permission: Specify permissions in
+            the string with the first letter of the word.
+        :return: An AccountSasPermissions object
+        :rtype: ~azure.storage.filedatalake.AccountSasPermissions
+        """
+        p_read = 'r' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_list = 'l' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_update = 'u' in permission
+        p_process = 'p' in permission
+        p_tag = 't' in permission
+        p_filter_by_tags = 'f' in permission
+        p_set_immutability_policy = 'i' in permission
+        parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+                     list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+                     filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy,
+                     permanent_delete=p_permanent_delete)
+
+        return parsed
+
+
+class Services(object):
+    """Specifies the services accessible with the account SAS.
+
+    :keyword bool blob:
+        Access for the `~azure.storage.blob.BlobServiceClient`. Default is False.
+    :keyword bool queue:
+        Access for the `~azure.storage.queue.QueueServiceClient`. Default is False.
+    :keyword bool fileshare:
+        Access for the `~azure.storage.fileshare.ShareServiceClient`. Default is False.
+    """
+
+    def __init__(
+        self, *,
+        blob: bool = False,
+        queue: bool = False,
+        fileshare: bool = False
+    ) -> None:
+        self.blob = blob
+        self.queue = queue
+        self.fileshare = fileshare
+        self._str = (('b' if self.blob else '') +
+                ('q' if self.queue else '') +
+                ('f' if self.fileshare else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create Services from a string.
+
+        To specify blob, queue, or file you need only to
+        include the first letter of the word in the string. E.g. for blob and queue
+        you would provide a string "bq".
+
+        :param str string: Specify blob, queue, or file in
+            in the string with the first letter of the word.
+        :return: A Services object
+        :rtype: ~azure.storage.blob.Services
+        """
+        res_blob = 'b' in string
+        res_queue = 'q' in string
+        res_file = 'f' in string
+
+        parsed = cls(blob=res_blob, queue=res_queue, fileshare=res_file)
+        parsed._str = string
+        return parsed
+
+
+class UserDelegationKey(object):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+    """
+
+    signed_oid: Optional[str] = None
+    """Object ID of this token."""
+    signed_tid: Optional[str] = None
+    """Tenant ID of the tenant that issued this token."""
+    signed_start: Optional[str] = None
+    """The datetime this token becomes valid."""
+    signed_expiry: Optional[str] = None
+    """The datetime this token expires."""
+    signed_service: Optional[str] = None
+    """What service this key is valid for."""
+    signed_version: Optional[str] = None
+    """The version identifier of the REST service that created this token."""
+    value: Optional[str] = None
+    """The user delegation key."""
+
+    def __init__(self):
+        self.signed_oid = None
+        self.signed_tid = None
+        self.signed_start = None
+        self.signed_expiry = None
+        self.signed_service = None
+        self.signed_version = None
+        self.value = None
+
+
+class StorageConfiguration(Configuration):
+    """
+    Specifies the configurable values used in Azure Storage.
+
+    :param int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :param int copy_polling_interval: The interval in seconds for polling copy operations.
+    :param int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob.
+    :param bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :param int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_chunk_upload_threshold: The max size for a single put operation.
+    :param int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :param int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :param int max_range_size: The max range size for file upload.
+
+    """
+
+    max_single_put_size: int
+    copy_polling_interval: int
+    max_block_size: int
+    min_large_block_upload_threshold: int
+    use_byte_buffer: bool
+    max_page_size: int
+    min_large_chunk_upload_threshold: int
+    max_single_get_size: int
+    max_chunk_get_size: int
+    max_range_size: int
+    user_agent_policy: UserAgentPolicy
+
+    def __init__(self, **kwargs):
+        super(StorageConfiguration, self).__init__(**kwargs)
+        self.max_single_put_size = kwargs.pop('max_single_put_size', 64 * 1024 * 1024)
+        self.copy_polling_interval = 15
+        self.max_block_size = kwargs.pop('max_block_size', 4 * 1024 * 1024)
+        self.min_large_block_upload_threshold = kwargs.get('min_large_block_upload_threshold', 4 * 1024 * 1024 + 1)
+        self.use_byte_buffer = kwargs.pop('use_byte_buffer', False)
+        self.max_page_size = kwargs.pop('max_page_size', 4 * 1024 * 1024)
+        self.min_large_chunk_upload_threshold = kwargs.pop('min_large_chunk_upload_threshold', 100 * 1024 * 1024 + 1)
+        self.max_single_get_size = kwargs.pop('max_single_get_size', 32 * 1024 * 1024)
+        self.max_chunk_get_size = kwargs.pop('max_chunk_get_size', 4 * 1024 * 1024)
+        self.max_range_size = kwargs.pop('max_range_size', 4 * 1024 * 1024)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py
new file mode 100644
index 00000000..112c1984
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/parser.py
@@ -0,0 +1,53 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import datetime, timezone
+from typing import Optional
+
+EPOCH_AS_FILETIME = 116444736000000000  # January 1, 1970 as MS filetime
+HUNDREDS_OF_NANOSECONDS = 10000000
+
+
+def _to_utc_datetime(value: datetime) -> str:
+    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+def _rfc_1123_to_datetime(rfc_1123: str) -> Optional[datetime]:
+    """Converts an RFC 1123 date string to a UTC datetime.
+
+    :param str rfc_1123: The time and date in RFC 1123 format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not rfc_1123:
+        return None
+
+    return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z")
+
+
+def _filetime_to_datetime(filetime: str) -> Optional[datetime]:
+    """Converts an MS filetime string to a UTC datetime. "0" indicates None.
+    If parsing MS Filetime fails, tries RFC 1123 as backup.
+
+    :param str filetime: The time and date in MS filetime format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not filetime:
+        return None
+
+    # Try to convert to MS Filetime
+    try:
+        temp_filetime = int(filetime)
+        if temp_filetime == 0:
+            return None
+
+        return datetime.fromtimestamp((temp_filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc)
+    except ValueError:
+        pass
+
+    # Try RFC 1123 as backup
+    return _rfc_1123_to_datetime(filetime)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py
new file mode 100644
index 00000000..ee75cd5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies.py
@@ -0,0 +1,694 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import logging
+import random
+import re
+import uuid
+from io import SEEK_SET, UnsupportedOperation
+from time import time
+from typing import Any, Dict, Optional, TYPE_CHECKING
+from urllib.parse import (
+    parse_qsl,
+    urlencode,
+    urlparse,
+    urlunparse,
+)
+from wsgiref.handlers import format_date_time
+
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+from azure.core.pipeline.policies import (
+    BearerTokenCredentialPolicy,
+    HeadersPolicy,
+    HTTPPolicy,
+    NetworkTraceLoggingPolicy,
+    RequestHistory,
+    SansIOHTTPPolicy
+)
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .models import LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import TokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+# Are we out of retries?
+def is_exhausted(settings):
+    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+    retry_counts = list(filter(None, retry_counts))
+    if not retry_counts:
+        return False
+    return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+# Is this method/status code retryable? (Based on allowlists and control
+# variables such as the number of total retries to allow, whether to
+# respect the Retry-After header, whether this header is present, and
+# whether the returned status code is on the list of status codes to
+# be retried upon on the presence of the aforementioned header)
+def is_retry(response, mode):
+    status = response.http_response.status_code
+    if 300 <= status < 500:
+        # An exception occurred, but in most cases it was expected. Examples could
+        # include a 309 Conflict or 412 Precondition Failed.
+        if status == 404 and mode == LocationMode.SECONDARY:
+            # Response code 404 should be retried if secondary was used.
+            return True
+        if status == 408:
+            # Response code 408 is a timeout and should be retried.
+            return True
+        return False
+    if status >= 500:
+        # Response codes above 500 with the exception of 501 Not Implemented and
+        # 505 Version Not Supported indicate a server issue and should be retried.
+        if status in [501, 505]:
+            return False
+        return True
+    return False
+
+
+def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+def urljoin(base_url, stub_url):
+    parsed = urlparse(base_url)
+    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+    return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+    def on_request(self, request):
+        message_id = request.context.options.pop('queue_message_id', None)
+        if message_id:
+            request.http_request.url = urljoin(
+                request.http_request.url,
+                message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+    request_id_header_name = 'x-ms-client-request-id'
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        super(StorageHeadersPolicy, self).on_request(request)
+        current_time = format_date_time(time())
+        request.http_request.headers['x-ms-date'] = current_time
+
+        custom_id = request.context.options.pop('client_request_id', None)
+        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+    # def on_response(self, request, response):
+    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
+    #     if self.request_id_header_name in response.http_response.headers:
+
+    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
+    #             raise AzureError(
+    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
+    #                 "Service request ID: {}".format(
+    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
+    #                     response.http_response.headers['x-ms-request-id']),
+    #                 response=response.http_response
+    #             )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
+        self.hosts = hosts
+        super(StorageHosts, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request.context.options['hosts'] = self.hosts
+        parsed_url = urlparse(request.http_request.url)
+
+        # Detect what location mode we're currently requesting with
+        location_mode = LocationMode.PRIMARY
+        for key, value in self.hosts.items():
+            if parsed_url.netloc == value:
+                location_mode = key
+
+        # See if a specific location mode has been specified, and if so, redirect
+        use_location = request.context.options.pop('use_location', None)
+        if use_location:
+            # Lock retries to the specific location
+            request.context.options['retry_to_secondary'] = False
+            if use_location not in self.hosts:
+                raise ValueError(f"Attempting to use undefined host location {use_location}")
+            if use_location != location_mode:
+                # Update request URL to use the specified location
+                updated = parsed_url._replace(netloc=self.hosts[use_location])
+                request.http_request.url = updated.geturl()
+                location_mode = use_location
+
+        request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+    """A policy that logs HTTP request and response to the DEBUG logger.
+
+    This accepts both global configuration, and per-request level with "enable_http_logger"
+    """
+
+    def __init__(self, logging_enable: bool = False, **kwargs) -> None:
+        self.logging_body = kwargs.pop("logging_body", False)
+        super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs)
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        http_request = request.http_request
+        options = request.context.options
+        self.logging_body = self.logging_body or options.pop("logging_body", False)
+        if options.pop("logging_enable", self.enable_http_logger):
+            request.context["logging_enable"] = True
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                log_url = http_request.url
+                query_params = http_request.query
+                if 'sig' in query_params:
+                    log_url = log_url.replace(query_params['sig'], "sig=*****")
+                _LOGGER.debug("Request URL: %r", log_url)
+                _LOGGER.debug("Request method: %r", http_request.method)
+                _LOGGER.debug("Request headers:")
+                for header, value in http_request.headers.items():
+                    if header.lower() == 'authorization':
+                        value = '*****'
+                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+                        # take the url apart and scrub away the signed signature
+                        scheme, netloc, path, params, query, fragment = urlparse(value)
+                        parsed_qs = dict(parse_qsl(query))
+                        parsed_qs['sig'] = '*****'
+
+                        # the SAS needs to be put back together
+                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+                    _LOGGER.debug("    %r: %r", header, value)
+                _LOGGER.debug("Request body:")
+
+                if self.logging_body:
+                    _LOGGER.debug(str(http_request.body))
+                else:
+                    # We don't want to log the binary data of a file upload.
+                    _LOGGER.debug("Hidden body, please use logging_body to show body")
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log request: %r", err)
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.pop("logging_enable", self.enable_http_logger):
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                _LOGGER.debug("Response status: %r", response.http_response.status_code)
+                _LOGGER.debug("Response headers:")
+                for res_header, value in response.http_response.headers.items():
+                    _LOGGER.debug("    %r: %r", res_header, value)
+
+                # We don't want to log binary data if the response is a file.
+                _LOGGER.debug("Response content:")
+                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+                header = response.http_response.headers.get('content-disposition')
+                resp_content_type = response.http_response.headers.get("content-type", "")
+
+                if header and pattern.match(header):
+                    filename = header.partition('=')[2]
+                    _LOGGER.debug("File attachments: %s", filename)
+                elif resp_content_type.endswith("octet-stream"):
+                    _LOGGER.debug("Body contains binary data.")
+                elif resp_content_type.startswith("image"):
+                    _LOGGER.debug("Body contains image data.")
+
+                if self.logging_body and resp_content_type.startswith("text"):
+                    _LOGGER.debug(response.http_response.text())
+                elif self.logging_body:
+                    try:
+                        _LOGGER.debug(response.http_response.body())
+                    except ValueError:
+                        _LOGGER.debug("Body is streamable")
+
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._request_callback = kwargs.get('raw_request_hook')
+        super(StorageRequestHook, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+        if request_callback:
+            request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(StorageResponseHook, self).__init__()
+
+    def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+    """A simple policy that sends the given headers
+    with the request.
+
+    This will overwrite any headers already defined in the request.
+    """
+    header_name = 'Content-MD5'
+
+    def __init__(self, **kwargs: Any) -> None:  # pylint: disable=unused-argument
+        super(StorageContentValidation, self).__init__()
+
+    @staticmethod
+    def get_content_md5(data):
+        # Since HTTP does not differentiate between no content and empty content,
+        # we have to perform a None check.
+        data = data or b""
+        md5 = hashlib.md5() # nosec
+        if isinstance(data, bytes):
+            md5.update(data)
+        elif hasattr(data, 'read'):
+            pos = 0
+            try:
+                pos = data.tell()
+            except:  # pylint: disable=bare-except
+                pass
+            for chunk in iter(lambda: data.read(4096), b""):
+                md5.update(chunk)
+            try:
+                data.seek(pos, SEEK_SET)
+            except (AttributeError, IOError) as exc:
+                raise ValueError("Data should be bytes or a seekable file-like object.") from exc
+        else:
+            raise ValueError("Data should be bytes or a seekable file-like object.")
+
+        return md5.digest()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        validate_content = request.context.options.pop('validate_content', False)
+        if validate_content and request.http_request.method != 'GET':
+            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+            request.http_request.headers[self.header_name] = computed_md5
+            request.context['validate_content_md5'] = computed_md5
+        request.context['validate_content'] = validate_content
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+            computed_md5 = request.context.get('validate_content_md5') or \
+                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+            if response.http_response.headers['content-md5'] != computed_md5:
+                raise AzureError((
+                    f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', "
+                    f"computed value is '{computed_md5}'."),
+                    response=response.http_response
+                )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    total_retries: int
+    """The max number of retries."""
+    connect_retries: int
+    """The max number of connect retries."""
+    retry_read: int
+    """The max number of read retries."""
+    retry_status: int
+    """The max number of status retries."""
+    retry_to_secondary: bool
+    """Whether the secondary endpoint should be retried."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.total_retries = kwargs.pop('retry_total', 10)
+        self.connect_retries = kwargs.pop('retry_connect', 3)
+        self.read_retries = kwargs.pop('retry_read', 3)
+        self.status_retries = kwargs.pop('retry_status', 3)
+        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+        super(StorageRetryPolicy, self).__init__()
+
+    def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
+        """
+        A function which sets the next host location on the request, if applicable.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the next host location.
+        :param PipelineRequest request: A pipeline request object.
+        """
+        if settings['hosts'] and all(settings['hosts'].values()):
+            url = urlparse(request.url)
+            # If there's more than one possible location, retry to the alternative
+            if settings['mode'] == LocationMode.PRIMARY:
+                settings['mode'] = LocationMode.SECONDARY
+            else:
+                settings['mode'] = LocationMode.PRIMARY
+            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+            request.url = updated.geturl()
+
+    def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
+        body_position = None
+        if hasattr(request.http_request.body, 'read'):
+            try:
+                body_position = request.http_request.body.tell()
+            except (AttributeError, UnsupportedOperation):
+                # if body position cannot be obtained, then retries will not work
+                pass
+        options = request.context.options
+        return {
+            'total': options.pop("retry_total", self.total_retries),
+            'connect': options.pop("retry_connect", self.connect_retries),
+            'read': options.pop("retry_read", self.read_retries),
+            'status': options.pop("retry_status", self.status_retries),
+            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+            'mode': options.pop("location_mode", LocationMode.PRIMARY),
+            'hosts': options.pop("hosts", None),
+            'hook': options.pop("retry_hook", None),
+            'body_position': body_position,
+            'count': 0,
+            'history': []
+        }
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:  # pylint: disable=unused-argument
+        """ Formula for computing the current backoff.
+        Should be calculated by child class.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :returns: The backoff time.
+        :rtype: float
+        """
+        return 0
+
+    def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        transport.sleep(backoff)
+
+    def increment(
+        self, settings: Dict[str, Any],
+        request: "PipelineRequest",
+        response: Optional["PipelineResponse"] = None,
+        error: Optional[AzureError] = None
+    ) -> bool:
+        """Increment the retry counters.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the increment operation.
+        :param PipelineRequest request: A pipeline request object.
+        :param Optional[PipelineResponse] response: A pipeline response object.
+        :param Optional[AzureError] error: An error encountered during the request, or
+            None if the response was received successfully.
+        :returns: Whether the retry attempts are exhausted.
+        :rtype: bool
+        """
+        settings['total'] -= 1
+
+        if error and isinstance(error, ServiceRequestError):
+            # Errors when we're fairly sure that the server did not receive the
+            # request, so it should be safe to retry.
+            settings['connect'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        elif error and isinstance(error, ServiceResponseError):
+            # Errors that occur after the request has been started, so we should
+            # assume that the server began processing it.
+            settings['read'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the allowlist
+            if response:
+                settings['status'] -= 1
+                settings['history'].append(RequestHistory(request, http_response=response))
+
+        if not is_exhausted(settings):
+            if request.method not in ['PUT'] and settings['retry_secondary']:
+                self._set_next_host_location(settings, request)
+
+            # rewind the request body if it is a stream
+            if request.body and hasattr(request.body, 'read'):
+                # no position was saved, then retry would not work
+                if settings['body_position'] is None:
+                    return False
+                try:
+                    # attempt to rewind the body to the initial position
+                    request.body.seek(settings['body_position'], SEEK_SET)
+                except (UnsupportedOperation, ValueError):
+                    # if body is not seekable, then retry would not work
+                    return False
+            settings['count'] += 1
+            return True
+        return False
+
+    def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to get backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
+        super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py
new file mode 100644
index 00000000..1c030a82
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/policies_async.py
@@ -0,0 +1,296 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import logging
+import random
+from typing import Any, Dict, TYPE_CHECKING
+
+from azure.core.exceptions import AzureError, StreamClosedError, StreamConsumedError
+from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .policies import encode_base64, is_retry, StorageContentValidation, StorageRetryPolicy
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        if asyncio.iscoroutine(settings['hook']):
+            await settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+        else:
+            settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+
+
+async def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        try:
+            await response.http_response.load_body()  # Load the body in memory and close the socket
+        except (StreamClosedError, StreamConsumedError):
+            pass
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(AsyncStorageResponseHook, self).__init__()
+
+    async def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = await self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or await is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            if asyncio.iscoroutine(response_callback):
+                await response_callback(response) # type: ignore
+            else:
+                response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    async def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        await transport.sleep(backoff)
+
+    async def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = await self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or await is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        await retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        await self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    await retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    await self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self,
+        initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3, **kwargs
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds. For example, by default the first retry
+        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+        third after (15+3^2) = 24 seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "AsyncTokenCredential", audience: str, **kwargs: Any) -> None:
+        super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    async def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        await self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py
new file mode 100644
index 00000000..54927cc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/request_handlers.py
@@ -0,0 +1,270 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import stat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+from os import fstat
+from typing import Dict, Optional
+
+import isodate
+
+
+_LOGGER = logging.getLogger(__name__)
+
+_REQUEST_DELIMITER_PREFIX = "batch_"
+_HTTP1_1_IDENTIFIER = "HTTP/1.1"
+_HTTP_LINE_ENDING = "\r\n"
+
+
+def serialize_iso(attr):
+    """Serialize Datetime object into ISO-8601 formatted string.
+
+    :param Datetime attr: Object to be serialized.
+    :rtype: str
+    :raises: ValueError if format invalid.
+    """
+    if not attr:
+        return None
+    if isinstance(attr, str):
+        attr = isodate.parse_datetime(attr)
+    try:
+        utc = attr.utctimetuple()
+        if utc.tm_year > 9999 or utc.tm_year < 1:
+            raise OverflowError("Hit max or min date")
+
+        date = f"{utc.tm_year:04}-{utc.tm_mon:02}-{utc.tm_mday:02}T{utc.tm_hour:02}:{utc.tm_min:02}:{utc.tm_sec:02}"
+        return date + 'Z'
+    except (ValueError, OverflowError) as err:
+        raise ValueError("Unable to serialize datetime object.") from err
+    except AttributeError as err:
+        raise TypeError("ISO-8601 object must be valid datetime object.") from err
+
+def get_length(data):
+    length = None
+    # Check if object implements the __len__ method, covers most input cases such as bytearray.
+    try:
+        length = len(data)
+    except:  # pylint: disable=bare-except
+        pass
+
+    if not length:
+        # Check if the stream is a file-like stream object.
+        # If so, calculate the size using the file descriptor.
+        try:
+            fileno = data.fileno()
+        except (AttributeError, UnsupportedOperation):
+            pass
+        else:
+            try:
+                mode = fstat(fileno).st_mode
+                if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
+                    #st_size only meaningful if regular file or symlink, other types
+                    # e.g. sockets may return misleading sizes like 0
+                    return fstat(fileno).st_size
+            except OSError:
+                # Not a valid fileno, may be possible requests returned
+                # a socket number?
+                pass
+
+        # If the stream is seekable and tell() is implemented, calculate the stream size.
+        try:
+            current_position = data.tell()
+            data.seek(0, SEEK_END)
+            length = data.tell() - current_position
+            data.seek(current_position, SEEK_SET)
+        except (AttributeError, OSError, UnsupportedOperation):
+            pass
+
+    return length
+
+
+def read_length(data):
+    try:
+        if hasattr(data, 'read'):
+            read_data = b''
+            for chunk in iter(lambda: data.read(4096), b""):
+                read_data += chunk
+            return len(read_data), read_data
+        if hasattr(data, '__iter__'):
+            read_data = b''
+            for chunk in data:
+                read_data += chunk
+            return len(read_data), read_data
+    except:  # pylint: disable=bare-except
+        pass
+    raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+        start_range, end_range, start_range_required=True,
+        end_range_required=True, check_content_md5=False, align_to_page=False):
+    # If end range is provided, start range must be provided
+    if (start_range_required or end_range is not None) and start_range is None:
+        raise ValueError("start_range value cannot be None.")
+    if end_range_required and end_range is None:
+        raise ValueError("end_range value cannot be None.")
+
+    # Page ranges must be 512 aligned
+    if align_to_page:
+        if start_range is not None and start_range % 512 != 0:
+            raise ValueError(f"Invalid page blob start_range: {start_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        if end_range is not None and end_range % 512 != 511:
+            raise ValueError(f"Invalid page blob end_range: {end_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+
+    # Format based on whether end_range is present
+    range_header = None
+    if end_range is not None:
+        range_header = f'bytes={start_range}-{end_range}'
+    elif start_range is not None:
+        range_header = f"bytes={start_range}-"
+
+    # Content MD5 can only be provided for a complete range less than 4MB in size
+    range_validation = None
+    if check_content_md5:
+        if start_range is None or end_range is None:
+            raise ValueError("Both start and end range required for MD5 content validation.")
+        if end_range - start_range > 4 * 1024 * 1024:
+            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+        range_validation = 'true'
+
+    return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+    headers = {}
+    if metadata:
+        for key, value in metadata.items():
+            headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value
+    return headers
+
+
+def serialize_batch_body(requests, batch_id):
+    """
+    --<delimiter>
+    <subrequest>
+    --<delimiter>
+    <subrequest>    (repeated as needed)
+    --<delimiter>--
+
+    Serializes the requests in this batch to a single HTTP mixed/multipart body.
+
+    :param List[~azure.core.pipeline.transport.HttpRequest] requests:
+        a list of sub-request for the batch request
+    :param str batch_id:
+        to be embedded in batch sub-request delimiter
+    :returns: The body bytes for this batch.
+    :rtype: bytes
+    """
+
+    if requests is None or len(requests) == 0:
+        raise ValueError('Please provide sub-request(s) for this batch request')
+
+    delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
+    newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
+    batch_body = []
+
+    content_index = 0
+    for request in requests:
+        request.headers.update({
+            "Content-ID": str(content_index),
+            "Content-Length": str(0)
+        })
+        batch_body.append(delimiter_bytes)
+        batch_body.append(_make_body_from_sub_request(request))
+        batch_body.append(newline_bytes)
+        content_index += 1
+
+    batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
+    # final line of body MUST have \r\n at the end, or it will not be properly read by the service
+    batch_body.append(newline_bytes)
+
+    return b"".join(batch_body)
+
+
+def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
+    """
+    Gets the delimiter used for this batch request's mixed/multipart HTTP format.
+
+    :param str batch_id:
+        Randomly generated id
+    :param bool is_prepend_dashes:
+        Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
+    :param bool is_append_dashes:
+        Whether to include the ending dashes. Used in the body on the closing delimiter only.
+    :returns: The delimiter, WITHOUT a trailing newline.
+    :rtype: str
+    """
+
+    prepend_dashes = '--' if is_prepend_dashes else ''
+    append_dashes = '--' if is_append_dashes else ''
+
+    return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
+
+
+def _make_body_from_sub_request(sub_request):
+    """
+     Content-Type: application/http
+     Content-ID: <sequential int ID>
+     Content-Transfer-Encoding: <value> (if present)
+
+     <verb> <path><query> HTTP/<version>
+     <header key>: <header value> (repeated as necessary)
+     Content-Length: <value>
+     (newline if content length > 0)
+     <body> (if content length > 0)
+
+     Serializes an http request.
+
+     :param ~azure.core.pipeline.transport.HttpRequest sub_request:
+        Request to serialize.
+     :returns: The serialized sub-request in bytes
+     :rtype: bytes
+     """
+
+    # put the sub-request's headers into a list for efficient str concatenation
+    sub_request_body = []
+
+    # get headers for ease of manipulation; remove headers as they are used
+    headers = sub_request.headers
+
+    # append opening headers
+    sub_request_body.append("Content-Type: application/http")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-ID: ")
+    sub_request_body.append(headers.pop("Content-ID", ""))
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-Transfer-Encoding: binary")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append HTTP verb and path and query and HTTP version
+    sub_request_body.append(sub_request.method)
+    sub_request_body.append(' ')
+    sub_request_body.append(sub_request.url)
+    sub_request_body.append(' ')
+    sub_request_body.append(_HTTP1_1_IDENTIFIER)
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
+    for header_name, header_value in headers.items():
+        if header_value is not None:
+            sub_request_body.append(header_name)
+            sub_request_body.append(": ")
+            sub_request_body.append(header_value)
+            sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    return ''.join(sub_request_body).encode()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py
new file mode 100644
index 00000000..af9a2fcd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/response_handlers.py
@@ -0,0 +1,200 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn
+from xml.etree.ElementTree import Element
+
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    DecodeError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceModifiedError,
+    ResourceNotFoundError,
+)
+from azure.core.pipeline.policies import ContentDecodePolicy
+
+from .authentication import AzureSigningError
+from .models import get_enum_value, StorageErrorCode, UserDelegationKey
+from .parser import _to_utc_datetime
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+    """There is a partial failure in batch operations.
+
+    :param str message: The message of the exception.
+    :param response: Server response to be deserialized.
+    :param list parts: A list of the parts in multipart response.
+    """
+
+    def __init__(self, message, response, parts):
+        self.parts = parts
+        super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+# Parses the blob length from the content range header: bytes 1-3/65537
+def parse_length_from_content_range(content_range):
+    if content_range is None:
+        return None
+
+    # First, split in space and take the second half: '1-3/65537'
+    # Next, split on slash and take the second half: '65537'
+    # Finally, convert to an int: 65537
+    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+    return normalized
+
+
+def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
+    try:
+        raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    except AttributeError:
+        raw_metadata = {k: v for k, v in response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return response.http_response.location_mode, deserialized
+
+
+def return_raw_deserialized(response, *_):
+    return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME]
+
+
+def process_storage_error(storage_error) -> NoReturn: # type: ignore [misc] # pylint:disable=too-many-statements, too-many-branches
+    raise_error = HttpResponseError
+    serialized = False
+    if isinstance(storage_error, AzureSigningError):
+        storage_error.message = storage_error.message + \
+            '. This is likely due to an invalid shared key. Please check your shared key and try again.'
+    if not storage_error.response or storage_error.response.status_code in [200, 204]:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (PartialBatchErrorException,
+                                  ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        try:
+            if error_body is None or len(error_body) == 0:
+                error_body = storage_error.response.reason
+        except AttributeError:
+            error_body = ''
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        # There is a chance error_dict is just a string
+        if error_dict and isinstance(error_dict, dict):
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met,
+                              StorageErrorCode.blob_overwritten]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.cannot_verify_copy_source,
+                              StorageErrorCode.blob_not_found,
+                              StorageErrorCode.queue_not_found,
+                              StorageErrorCode.container_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.share_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.blob_already_exists,
+                              StorageErrorCode.queue_already_exists,
+                              StorageErrorCode.container_already_exists,
+                              StorageErrorCode.container_being_deleted,
+                              StorageErrorCode.queue_being_deleted,
+                              StorageErrorCode.share_already_exists,
+                              StorageErrorCode.share_being_deleted]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+    internal_user_delegation_key = UserDelegationKey()
+    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+    internal_user_delegation_key.value = service_user_delegation_key.value
+    return internal_user_delegation_key
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py
new file mode 100644
index 00000000..df29222b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/shared_access_signature.py
@@ -0,0 +1,252 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from datetime import date
+
+from .parser import _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+# cspell:ignoreRegExp rsc.
+# cspell:ignoreRegExp s..?id
+class QueryStringConstants(object):
+    SIGNED_SIGNATURE = 'sig'
+    SIGNED_PERMISSION = 'sp'
+    SIGNED_START = 'st'
+    SIGNED_EXPIRY = 'se'
+    SIGNED_RESOURCE = 'sr'
+    SIGNED_IDENTIFIER = 'si'
+    SIGNED_IP = 'sip'
+    SIGNED_PROTOCOL = 'spr'
+    SIGNED_VERSION = 'sv'
+    SIGNED_CACHE_CONTROL = 'rscc'
+    SIGNED_CONTENT_DISPOSITION = 'rscd'
+    SIGNED_CONTENT_ENCODING = 'rsce'
+    SIGNED_CONTENT_LANGUAGE = 'rscl'
+    SIGNED_CONTENT_TYPE = 'rsct'
+    START_PK = 'spk'
+    START_RK = 'srk'
+    END_PK = 'epk'
+    END_RK = 'erk'
+    SIGNED_RESOURCE_TYPES = 'srt'
+    SIGNED_SERVICES = 'ss'
+    SIGNED_OID = 'skoid'
+    SIGNED_TID = 'sktid'
+    SIGNED_KEY_START = 'skt'
+    SIGNED_KEY_EXPIRY = 'ske'
+    SIGNED_KEY_SERVICE = 'sks'
+    SIGNED_KEY_VERSION = 'skv'
+    SIGNED_ENCRYPTION_SCOPE = 'ses'
+
+    # for ADLS
+    SIGNED_AUTHORIZED_OID = 'saoid'
+    SIGNED_UNAUTHORIZED_OID = 'suoid'
+    SIGNED_CORRELATION_ID = 'scid'
+    SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+    @staticmethod
+    def to_list():
+        return [
+            QueryStringConstants.SIGNED_SIGNATURE,
+            QueryStringConstants.SIGNED_PERMISSION,
+            QueryStringConstants.SIGNED_START,
+            QueryStringConstants.SIGNED_EXPIRY,
+            QueryStringConstants.SIGNED_RESOURCE,
+            QueryStringConstants.SIGNED_IDENTIFIER,
+            QueryStringConstants.SIGNED_IP,
+            QueryStringConstants.SIGNED_PROTOCOL,
+            QueryStringConstants.SIGNED_VERSION,
+            QueryStringConstants.SIGNED_CACHE_CONTROL,
+            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+            QueryStringConstants.SIGNED_CONTENT_ENCODING,
+            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+            QueryStringConstants.SIGNED_CONTENT_TYPE,
+            QueryStringConstants.START_PK,
+            QueryStringConstants.START_RK,
+            QueryStringConstants.END_PK,
+            QueryStringConstants.END_RK,
+            QueryStringConstants.SIGNED_RESOURCE_TYPES,
+            QueryStringConstants.SIGNED_SERVICES,
+            QueryStringConstants.SIGNED_OID,
+            QueryStringConstants.SIGNED_TID,
+            QueryStringConstants.SIGNED_KEY_START,
+            QueryStringConstants.SIGNED_KEY_EXPIRY,
+            QueryStringConstants.SIGNED_KEY_SERVICE,
+            QueryStringConstants.SIGNED_KEY_VERSION,
+            QueryStringConstants.SIGNED_ENCRYPTION_SCOPE,
+            # for ADLS
+            QueryStringConstants.SIGNED_AUTHORIZED_OID,
+            QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+            QueryStringConstants.SIGNED_CORRELATION_ID,
+            QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+        ]
+
+
+class SharedAccessSignature(object):
+    '''
+    Provides a factory for creating account access
+    signature tokens with an account name and account key. Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    '''
+
+    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+        '''
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param str account_key:
+            The access key to generate the shares access signatures.
+        :param str x_ms_version:
+            The service version used to generate the shared access signatures.
+        '''
+        self.account_name = account_name
+        self.account_key = account_key
+        self.x_ms_version = x_ms_version
+
+    def generate_account(
+        self, services,
+        resource_types,
+        permission,
+        expiry,
+        start=None,
+        ip=None,
+        protocol=None,
+        sts_hook=None,
+        **kwargs
+    ) -> str:
+        '''
+        Generates a shared access signature for the account.
+        Use the returned signature with the sas_token parameter of the service
+        or to create a new account object.
+
+        :param Any services: The specified services associated with the shared access signature.
+        :param ResourceTypes resource_types:
+            Specifies the resource types that are accessible with the account
+            SAS. You can combine values to provide access to more than one
+            resource type.
+        :param AccountSasPermissions permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy. You can combine
+            values to provide more than one permission.
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :keyword str encryption_scope:
+            Optional. If specified, this is the encryption scope to use when sending requests
+            authorized with this SAS URI.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        '''
+        sas = _SharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_account(services, resource_types)
+        sas.add_encryption_scope(**kwargs)
+        sas.add_account_signature(self.account_name, self.account_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+    def __init__(self):
+        self.query_dict = {}
+        self.string_to_sign = ""
+
+    def _add_query(self, name, val):
+        if val:
+            self.query_dict[name] = str(val) if val is not None else None
+
+    def add_encryption_scope(self, **kwargs):
+        self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop('encryption_scope', None))
+
+    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+        if isinstance(start, date):
+            start = _to_utc_datetime(start)
+
+        if isinstance(expiry, date):
+            expiry = _to_utc_datetime(expiry)
+
+        self._add_query(QueryStringConstants.SIGNED_START, start)
+        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+        self._add_query(QueryStringConstants.SIGNED_IP, ip)
+        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+    def add_resource(self, resource):
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+    def add_id(self, policy_id):
+        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+    def add_account(self, services, resource_types):
+        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+    def add_override_response_headers(self, cache_control,
+                                      content_disposition,
+                                      content_encoding,
+                                      content_language,
+                                      content_type):
+        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+    def add_account_signature(self, account_name, account_key):
+        def get_value_to_append(query):
+            return_value = self.query_dict.get(query) or ''
+            return return_value + '\n'
+
+        string_to_sign = \
+            (account_name + '\n' +
+             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+             get_value_to_append(QueryStringConstants.SIGNED_START) +
+             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE))
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key, string_to_sign))
+        self.string_to_sign = string_to_sign
+
+    def get_token(self) -> str:
+        return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None])
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py
new file mode 100644
index 00000000..b31cfb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads.py
@@ -0,0 +1,604 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from concurrent import futures
+from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation
+from itertools import islice
+from math import ceil
+from threading import Lock
+
+from azure.core.tracing.common import with_current_context
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(executor.submit(with_current_context(uploader), next_chunk))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    done, _running = futures.wait(running)
+    range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        validate_content=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        validate_content=validate_content,
+        progress_hook=progress_hook,
+        **kwargs)
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_chunk_streams()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_chunk), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_substream_blocks()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_substream_block), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+    if any(range_ids):
+        return sorted(range_ids)
+    return []
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b""
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if not isinstance(temp, bytes):
+                    raise TypeError("Blob data should be of type bytes.")
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b"" or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    def _update_progress(self, length):
+        if self.progress_lock is not None:
+            with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = self._upload_chunk(chunk_offset, chunk_data)
+        self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    def process_substream_block(self, block_data):
+        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = self._upload_substream_block(index, block_stream)
+        self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop("modified_access_conditions", None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return index, block_id
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        return not any(bytearray(chunk_data))
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f"bytes={chunk_offset}-{chunk_end}"
+            computed_md5 = None
+            self.response_headers = self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+            self.current_length = int(self.response_headers["blob_append_offset"])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        self.response_headers = self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return f'bytes={chunk_offset}-{chunk_end}', response
+
+    # TODO: Implement this method.
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class SubStream(IOBase):
+
+    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+        # Python 2.7: file-like objects created with open() typically support seek(), but are not
+        # derivations of io.IOBase and thus do not implement seekable().
+        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+        try:
+            # only the main thread runs this, so there's no need grabbing the lock
+            wrapped_stream.seek(0, SEEK_CUR)
+        except Exception as exc:
+            raise ValueError("Wrapped stream must support seek().") from exc
+
+        self._lock = lockObj
+        self._wrapped_stream = wrapped_stream
+        self._position = 0
+        self._stream_begin_index = stream_begin_index
+        self._length = length
+        self._buffer = BytesIO()
+
+        # we must avoid buffering more than necessary, and also not use up too much memory
+        # so the max buffer size is capped at 4MB
+        self._max_buffer_size = (
+            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+        )
+        self._current_buffer_start = 0
+        self._current_buffer_size = 0
+        super(SubStream, self).__init__()
+
+    def __len__(self):
+        return self._length
+
+    def close(self):
+        if self._buffer:
+            self._buffer.close()
+        self._wrapped_stream = None
+        IOBase.close(self)
+
+    def fileno(self):
+        return self._wrapped_stream.fileno()
+
+    def flush(self):
+        pass
+
+    def read(self, size=None):
+        if self.closed:  # pylint: disable=using-constant-test
+            raise ValueError("Stream is closed.")
+
+        if size is None:
+            size = self._length - self._position
+
+        # adjust if out of bounds
+        if size + self._position >= self._length:
+            size = self._length - self._position
+
+        # return fast
+        if size == 0 or self._buffer.closed:
+            return b""
+
+        # attempt first read from the read buffer and update position
+        read_buffer = self._buffer.read(size)
+        bytes_read = len(read_buffer)
+        bytes_remaining = size - bytes_read
+        self._position += bytes_read
+
+        # repopulate the read buffer from the underlying stream to fulfill the request
+        # ensure the seek and read operations are done atomically (only if a lock is provided)
+        if bytes_remaining > 0:
+            with self._buffer:
+                # either read in the max buffer size specified on the class
+                # or read in just enough data for the current block/sub stream
+                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+                # lock is only defined if max_concurrency > 1 (parallel uploads)
+                if self._lock:
+                    with self._lock:
+                        # reposition the underlying stream to match the start of the data to read
+                        absolute_position = self._stream_begin_index + self._position
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+                        # If we can't seek to the right location, our read will be corrupted so fail fast.
+                        if self._wrapped_stream.tell() != absolute_position:
+                            raise IOError("Stream failed to seek to the desired location.")
+                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+                else:
+                    absolute_position = self._stream_begin_index + self._position
+                    # It's possible that there's connection problem during data transfer,
+                    # so when we retry we don't want to read from current position of wrapped stream,
+                    # instead we should seek to where we want to read from.
+                    if self._wrapped_stream.tell() != absolute_position:
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+
+                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+            if buffer_from_stream:
+                # update the buffer with new data from the wrapped stream
+                # we need to note down the start position and size of the buffer, in case seek is performed later
+                self._buffer = BytesIO(buffer_from_stream)
+                self._current_buffer_start = self._position
+                self._current_buffer_size = len(buffer_from_stream)
+
+                # read the remaining bytes from the new buffer and update position
+                second_read_buffer = self._buffer.read(bytes_remaining)
+                read_buffer += second_read_buffer
+                self._position += len(second_read_buffer)
+
+        return read_buffer
+
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        raise UnsupportedOperation
+
+    def seek(self, offset, whence=0):
+        if whence is SEEK_SET:
+            start_index = 0
+        elif whence is SEEK_CUR:
+            start_index = self._position
+        elif whence is SEEK_END:
+            start_index = self._length
+            offset = -offset
+        else:
+            raise ValueError("Invalid argument for the 'whence' parameter.")
+
+        pos = start_index + offset
+
+        if pos > self._length:
+            pos = self._length
+        elif pos < 0:
+            pos = 0
+
+        # check if buffer is still valid
+        # if not, drop buffer
+        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+            self._buffer.close()
+            self._buffer = BytesIO()
+        else:  # if yes seek to correct position
+            delta = pos - self._current_buffer_start
+            self._buffer.seek(delta, SEEK_SET)
+
+        self._position = pos
+        return pos
+
+    def seekable(self):
+        return True
+
+    def tell(self):
+        return self._position
+
+    def write(self):
+        raise UnsupportedOperation
+
+    def writelines(self):
+        raise UnsupportedOperation
+
+    def writeable(self):
+        return False
+
+
+class IterStreamer(object):
+    """
+    File-like streaming iterator.
+    """
+
+    def __init__(self, generator, encoding="UTF-8"):
+        self.generator = generator
+        self.iterator = iter(generator)
+        self.leftover = b""
+        self.encoding = encoding
+
+    def __len__(self):
+        return self.generator.__len__()
+
+    def __iter__(self):
+        return self.iterator
+
+    def seekable(self):
+        return False
+
+    def __next__(self):
+        return next(self.iterator)
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    def read(self, size):
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = self.__next__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py
new file mode 100644
index 00000000..3e102ec5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared/uploads_async.py
@@ -0,0 +1,460 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import asyncio
+import inspect
+import threading
+from asyncio import Lock
+from io import UnsupportedOperation
+from itertools import islice
+from math import ceil
+from typing import AsyncGenerator, Union
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
+
+
+async def _async_parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = await pending.__anext__()
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopAsyncIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def _parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_chunk_streams()
+        running_futures = []
+        for _ in range(max_concurrency):
+            try:
+                chunk = await upload_tasks.__anext__()
+                running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk)))
+            except StopAsyncIteration:
+                break
+
+        range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        async for chunk in uploader.get_chunk_streams():
+            range_ids.append(await uploader.process_chunk(chunk))
+
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+async def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_substream_blocks()
+        running_futures = [
+            asyncio.ensure_future(uploader.process_substream_block(u))
+            for u in islice(upload_tasks, 0, max_concurrency)
+        ]
+        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        for block in uploader.get_substream_blocks():
+            range_ids.append(await uploader.process_substream_block(block))
+    if any(range_ids):
+        return sorted(range_ids)
+    return
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = threading.Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    async def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b''
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if inspect.isawaitable(temp):
+                    temp = await temp
+                if not isinstance(temp, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b'' or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    async def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    async def _update_progress(self, length):
+        if self.progress_lock is not None:
+            async with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await self.progress_hook(self.progress_total, self.total_size)
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = await self._upload_chunk(chunk_offset, chunk_data)
+        await self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    async def process_substream_block(self, block_data):
+        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    async def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = await self._upload_substream_block(index, block_stream)
+        await self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop('modified_access_conditions', None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        await self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            body=chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options)
+        return index, block_id
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            await self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        for each_byte in chunk_data:
+            if each_byte not in [0, b'\x00']:
+                return False
+        return True
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f'bytes={chunk_offset}-{chunk_end}'
+            computed_md5 = None
+            self.response_headers = await self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+            self.current_length = int(self.response_headers['blob_append_offset'])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        self.response_headers = await self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            await self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = await self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        range_id = f'bytes={chunk_offset}-{chunk_end}'
+        return range_id, response
+
+    # TODO: Implement this method.
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AsyncIterStreamer():
+    """
+    File-like streaming object for AsyncGenerators.
+    """
+    def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"):
+        self.iterator = generator.__aiter__()
+        self.leftover = b""
+        self.encoding = encoding
+
+    def seekable(self):
+        return False
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    async def read(self, size: int) -> bytes:
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = await self.iterator.__anext__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopAsyncIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py
new file mode 100644
index 00000000..12b63e83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_shared_access_signature.py
@@ -0,0 +1,462 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Any, Callable, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import parse_qs
+
+from azure.storage.blob import generate_account_sas as generate_blob_account_sas
+from azure.storage.blob import generate_container_sas, generate_blob_sas
+from ._shared.models import Services
+from ._shared.shared_access_signature import QueryStringConstants
+
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from ._models import (
+        AccountSasPermissions,
+        DirectorySasPermissions,
+        FileSasPermissions,
+        FileSystemSasPermissions,
+        ResourceTypes,
+        UserDelegationKey
+    )
+
+
+def generate_account_sas(
+    account_name: str,
+    account_key: str,
+    resource_types: Union["ResourceTypes", str],
+    permission: Union["AccountSasPermissions", str],
+    expiry: Union["datetime", str],
+    *,
+    services: Union[Services, str] = Services(blob=True),
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for the DataLake service.
+
+    Use the returned signature as the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str account_key:
+        The access key to generate the shared access signature.
+    :param resource_types:
+        Specifies the resource types that are accessible with the account SAS.
+    :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+    :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        The provided datetime will always be interpreted as UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword Union[Services, str] services:
+        Specifies the services that the Shared Access Signature (sas) token will be able to be utilized with.
+        Will default to only this package (i.e. blobs) if not provided.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    return generate_blob_account_sas(
+        account_name=account_name,
+        account_key=account_key,
+        resource_types=resource_types,
+        permission=permission,
+        expiry=expiry,
+        services=services,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_file_system_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[FileSystemSasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a file system.
+
+    Use the returned signature with the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    return generate_container_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_directory_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    directory_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[DirectorySasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a directory.
+
+    Use the returned signature with the credential parameter of any DataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str directory_name:
+        The name of the directory.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.DirectorySasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    depth = len(directory_name.strip("/").split("/"))
+    return generate_blob_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        blob_name=directory_name,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sdd=depth,
+        is_directory=True,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_file_sas(
+    account_name,  # type: str
+    file_system_name,  # type: str
+    directory_name,  # type: str
+    file_name,  # type: str
+    credential,  # type: Union[str, UserDelegationKey]
+    permission=None,  # type: Optional[Union[FileSasPermissions, str]]
+    expiry=None,  # type: Optional[Union[datetime, str]]
+    *,
+    sts_hook=None,  # type: Optional[Callable[[str], None]]
+    **kwargs  # type: Any
+):
+    # type: (...) -> str
+    """Generates a shared access signature for a file.
+
+    Use the returned signature with the credential parameter of any BDataLakeServiceClient,
+    FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str file_system_name:
+        The name of the file system.
+    :param str directory_name:
+        The name of the directory.
+    :param str file_name:
+        The name of the file.
+    :param str credential:
+        Credential could be either account key or user delegation key.
+        If use account key is used as credential, then the credential type should be a str.
+        Instead of an account key, the user could also pass in a user delegation key.
+        A user delegation key can be obtained from the service by authenticating with an AAD identity;
+        this can be accomplished
+        by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+        When present, the SAS is signed with the user delegation key instead.
+    :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered racwdlmeop.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.filedatalake.FileSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :keyword start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :paramtype start: ~datetime.datetime or str
+    :keyword str policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.filedatalake.FileSystemClient.set_file_system_access_policy`.
+    :keyword str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str preauthorized_agent_object_id:
+        The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+        the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+        user delegation key has the required permissions before granting access but no additional permission check for
+        the agent object id will be performed.
+    :keyword str agent_object_id:
+        The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+        perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+        of the user delegation key has the required permissions before granting access and the service will perform an
+        additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+    :keyword str correlation_id:
+        The correlation id to correlate the storage audit logs with the audit logs used by the principal
+        generating and distributing the SAS. This can only be used when generating a SAS with delegation key.
+    :keyword str encryption_scope:
+        Specifies the encryption scope for a request made so that all write operations will be service encrypted.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    if directory_name:
+        path = directory_name.rstrip('/') + "/" + file_name
+    else:
+        path = file_name
+    return generate_blob_sas(
+        account_name=account_name,
+        container_name=file_system_name,
+        blob_name=path,
+        account_key=credential if isinstance(credential, str) else None,
+        user_delegation_key=credential if not isinstance(credential, str) else None,
+        permission=permission,
+        expiry=expiry,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+def _is_credential_sastoken(credential: Any) -> bool:
+    if not credential or not isinstance(credential, str):
+        return False
+
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = parse_qs(credential.lstrip("?"))
+    if parsed_query and all(k in sas_values for k in parsed_query):
+        return True
+    return False
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py
new file mode 100644
index 00000000..6cd89540
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_upload_helper.py
@@ -0,0 +1,105 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._deserialize import (
+    process_storage_error)
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import (
+    upload_data_chunks,
+    DataLakeFileChunkUploader, upload_substream_blocks)
+from ...core.exceptions import HttpResponseError
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
+    return any([
+        modified_access_conditions.if_modified_since,
+        modified_access_conditions.if_unmodified_since,
+        modified_access_conditions.if_none_match,
+        modified_access_conditions.if_match
+    ])
+
+
+def upload_datalake_file(
+        client=None,
+        stream=None,
+        length=None,
+        overwrite=None,
+        validate_content=None,
+        max_concurrency=None,
+        file_settings=None,
+        **kwargs):
+    try:
+        if length == 0:
+            return {}
+        properties = kwargs.pop('properties', None)
+        umask = kwargs.pop('umask', None)
+        permissions = kwargs.pop('permissions', None)
+        path_http_headers = kwargs.pop('path_http_headers', None)
+        modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+        chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+        encryption_context = kwargs.pop('encryption_context', None)
+
+        if not overwrite:
+            # if customers didn't specify access conditions, they cannot flush data to existing file
+            if not _any_conditions(modified_access_conditions):
+                modified_access_conditions.if_none_match = '*'
+            if properties or umask or permissions:
+                raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+        if overwrite:
+            response = client.create(
+                resource='file',
+                path_http_headers=path_http_headers,
+                properties=properties,
+                modified_access_conditions=modified_access_conditions,
+                umask=umask,
+                permissions=permissions,
+                encryption_context=encryption_context,
+                cls=return_response_headers,
+                **kwargs)
+
+            # this modified_access_conditions will be applied to flush_data to make sure
+            # no other flush between create and the current flush
+            modified_access_conditions.if_match = response['etag']
+            modified_access_conditions.if_none_match = None
+            modified_access_conditions.if_modified_since = None
+            modified_access_conditions.if_unmodified_since = None
+
+        use_original_upload_path = file_settings.use_byte_buffer or \
+            validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            upload_data_chunks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                **kwargs)
+        else:
+            upload_substream_blocks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                **kwargs
+            )
+
+        return client.flush_data(position=length,
+                                 path_http_headers=path_http_headers,
+                                 modified_access_conditions=modified_access_conditions,
+                                 close=True,
+                                 cls=return_response_headers,
+                                 **kwargs)
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py
new file mode 100644
index 00000000..de61a38b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.19.0"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py
new file mode 100644
index 00000000..c24dde8d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/__init__.py
@@ -0,0 +1,24 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download_async import StorageStreamDownloader
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._file_system_client_async import FileSystemClient
+from ._data_lake_service_client_async import DataLakeServiceClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+
+__all__ = [
+    'DataLakeServiceClient',
+    'FileSystemClient',
+    'DataLakeDirectoryClient',
+    'DataLakeFileClient',
+    'DataLakeLeaseClient',
+    'ExponentialRetry',
+    'LinearRetry',
+    'StorageStreamDownloader'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py
new file mode 100644
index 00000000..578f896e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py
@@ -0,0 +1,721 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote  # type: ignore
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase
+from .._deserialize import deserialize_dir_properties
+from .._models import DirectoryProperties, FileProperties
+from .._shared.base_client_async import AsyncTransportWrapper
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._list_paths_helper import PathPropertiesPaged
+from ._path_client_async import PathClient
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from datetime import datetime
+    from .._models import PathProperties
+
+
+class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase):
+    """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+    For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+    can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param directory_name:
+        The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+    :type directory_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+            :start-after: [START instantiate_directory_client_from_conn_str]
+            :end-before: [END instantiate_directory_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        directory_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call
+                                                      credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, metadata=None,  # type: Optional[Dict[str, str]]
+                               **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new directory.
+
+        :param metadata:
+            Name-value pairs associated with the directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 8
+                :caption: Create directory.
+        """
+        return await self._create('directory', metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a directory exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._exists(**kwargs)
+
+    @distributed_trace_async
+    async def delete_directory(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified directory for deletion.
+
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 4
+                :caption: Delete directory.
+        """
+        return await self._delete(recursive=True, **kwargs)
+
+    @distributed_trace_async
+    async def get_directory_properties(self, **kwargs):
+        # type: (**Any) -> DirectoryProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the directory. It does not return the content of the directory.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.DirectoryProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START get_directory_properties]
+                :end-before: [END get_directory_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file/directory.
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs)
+
+    @distributed_trace_async
+    async def rename_directory(self, new_name,  # type: str
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            the new directory name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+        :keyword source_lease:
+            A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient containing the renamed directory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_directory_async.py
+                :start-after: [START rename_directory]
+                :end-before: [END rename_directory]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source directory.
+        """
+        new_file_system, new_path, new_dir_sas = self._parse_rename_path(new_name)
+
+        new_directory_client = DataLakeDirectoryClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, directory_name=new_path,
+            credential=self._raw_credential or new_dir_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline)
+        await new_directory_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_directory_client
+
+    @distributed_trace_async
+    async def create_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                                   metadata=None,  # type: Optional[Dict[str, str]]
+                                   **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create a subdirectory and return the subdirectory client to be interacted with.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        await subdir.create_directory(metadata=metadata, **kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def delete_sub_directory(self, sub_directory,  # type: Union[DirectoryProperties, str]
+                                   **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified subdirectory for deletion.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient for the subdirectory.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        subdir = self.get_sub_directory_client(sub_directory)
+        await subdir.delete_directory(**kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def create_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create a new file and return the file client to be interacted with.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with the new file.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        file_client = self.get_file_client(file)
+        await file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace
+    def get_paths(
+        self, *,
+        recursive: bool = True,
+        max_results: Optional[int] = None,
+        upn: Optional[bool] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged["PathProperties"]:
+        """Returns an async generator to list the paths under specified file system and directory.
+        The generator will lazily follow the continuation tokens returned by the service.
+
+        :keyword bool recursive: Set True for recursive, False for iterative. The default value is True.
+        :keyword Optional[int] max_results: An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword Optional[bool] upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is None. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. The default value is None.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.PathProperties]
+        """
+        timeout = kwargs.pop('timeout', None)
+        hostname = self._hosts[self._location_mode]
+        url = f"{self.scheme}://{hostname}/{quote(self.file_system_name)}"
+        client = self._build_generated_client(url)
+        command = functools.partial(
+            client.file_system.list_paths,
+            path=self.path_name,
+            timeout=timeout,
+            **kwargs
+        )
+        return AsyncItemPaged(
+            command, recursive, path=self.path_name, max_results=max_results,
+            upn=upn, page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    def get_file_client(self, file  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+        """
+        try:
+            file_path = file.get('name')
+        except AttributeError:
+            file_path = self.path_name + '/' + str(file)
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    def get_sub_directory_client(self, sub_directory  # type: Union[DirectoryProperties, str]
+                                 ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified subdirectory of the current directory.
+
+        The sub subdirectory need not already exist.
+
+        :param sub_directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        try:
+            subdir_path = sub_directory.get('name')
+        except AttributeError:
+            subdir_path = self.path_name + '/' + str(sub_directory)
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(
+            self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py
new file mode 100644
index 00000000..9b00b0b6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_file_client_async.py
@@ -0,0 +1,735 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING)
+from urllib.parse import quote, unquote
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+from ._download_async import StorageStreamDownloader
+from ._path_client_async import PathClient
+from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase
+from .._serialize import convert_datetime_to_rfc1123
+from .._deserialize import process_storage_error, deserialize_file_properties
+from .._models import FileProperties
+from ..aio._upload_helper import upload_datalake_file
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings
+
+
+class DataLakeFileClient(PathClient, DataLakeFileClientBase):
+    """A client to interact with the DataLake file, even if the file may not yet exist.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :type file_path: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+            :start-after: [START instantiate_file_client_from_conn_str]
+            :end-before: [END instantiate_file_client_from_conn_str]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+    """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        file_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+                                                 credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_file(self, content_settings=None,  # type: Optional[ContentSettings]
+                          metadata=None,  # type: Optional[Dict[str, str]]
+                          **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create a new file.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :returns: response dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 4
+                :caption: Create file.
+        """
+        return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._exists(**kwargs)
+
+    @distributed_trace_async
+    async def delete_file(self, **kwargs):
+        # type: (...) -> None
+        """
+        Marks the specified file for deletion.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: None.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 4
+                :caption: Delete file.
+        """
+        return await self._delete(**kwargs)
+
+    @distributed_trace_async
+    async def get_file_properties(self, **kwargs):
+        # type: (**Any) -> FileProperties
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file. It does not return the content of the file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.FileProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: All user-defined metadata, standard HTTP properties, and system properties for the file.
+        :rtype: ~azure.storage.filedatalake.FileProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START get_file_properties]
+                :end-before: [END get_file_properties]
+                :language: python
+                :dedent: 4
+                :caption: Getting the properties for a file.
+        """
+        return await self._get_path_properties(cls=deserialize_file_properties, **kwargs)
+
+    @distributed_trace_async
+    async def set_file_expiry(self, expiry_options,  # type: str
+                              expires_on=None,  # type: Optional[Union[datetime, int]]
+                              **kwargs):
+        # type: (...) -> None
+        """Sets the time a file will expire and be deleted.
+
+        :param str expiry_options:
+            Required. Indicates mode of the expiry time.
+            Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+        :param datetime or int expires_on:
+            The time to set the file to expiry.
+            When expiry_options is RelativeTo*, expires_on should be an int in milliseconds
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        if isinstance(expires_on, datetime):
+            expires_on = convert_datetime_to_rfc1123(expires_on)
+        elif expires_on is not None:
+            expires_on = str(expires_on)
+        await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on,
+                                                                       **kwargs)
+
+    @distributed_trace_async
+    async def upload_data(
+            self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+            length: Optional[int] = None,
+            overwrite: Optional[bool] = False,
+            **kwargs
+        ) -> Dict[str, Any]:
+        """
+        Upload data to a file.
+
+        :param data: Content to be uploaded to file
+        :type data: bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], or IO[AnyStr]
+        :param int length: Size of the data in bytes.
+        :param bool overwrite: to overwrite an existing file or not.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the blob as metadata.
+        :paramtype metadata: dict[str, str] or None
+        :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+            Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str permissions: Optional and only valid if Hierarchical Namespace
+         is enabled for the account. Sets POSIX access permissions for the file
+         owner, the file owning group, and others. Each class may be granted
+         read, write, or execute permission.  The sticky bit is also supported.
+         Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+         supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int chunk_size:
+            The maximum chunk size for uploading a file in chunks.
+            Defaults to 100*1024*1024, or 100MB.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :return: response dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        options = self._upload_options(
+            data,
+            length=length,
+            overwrite=overwrite,
+            **kwargs)
+        return await upload_datalake_file(**options)
+
+    @distributed_trace_async
+    async def append_data(self, data,  # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+                          offset,  # type: int
+                          length=None,  # type: Optional[int]
+                          **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime, int]]
+        """Append data to the file.
+
+        :param data: Content to be appended to file
+        :type data: bytes, str, Iterable[AnyStr], or IO[AnyStr]
+        :param int offset: start position of the data to be appended to.
+        :param length: Size of the data in bytes.
+        :type length: int or None
+        :keyword bool flush:
+            If true, will commit the data after it is appended.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete. Requires `flush=True`.
+            "acquire-release" - Acquire a lease and release it once the operations is complete. Requires `flush=True`.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: dict of the response header.
+        :rtype: dict[str, str], dict[str, ~datetime.datetime], or dict[str, int]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START append_data]
+                :end-before: [END append_data]
+                :language: python
+                :dedent: 4
+                :caption: Append data to the file.
+        """
+        options = self._append_data_options(
+            data=data,
+            offset=offset,
+            scheme=self.scheme,
+            length=length,
+            **kwargs)
+        try:
+            return await self._client.path.append_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def flush_data(self, offset,  # type: int
+                         retain_uncommitted_data=False,  # type: Optional[bool]
+                         **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """ Commit the previous appended data.
+
+        :param int offset: offset is equal to the length of the file after commit the
+            previous appended data.
+        :param bool retain_uncommitted_data: Valid only for flush operations.  If
+            "true", uncommitted data is retained after the flush operation
+            completes; otherwise, the uncommitted data is deleted after the flush
+            operation.  The default is false.  Data at offsets less than the
+            specified position are written to the file when flush succeeds, but
+            this optional parameter allows data after the flush position to be
+            retained for a future flush operation.
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword bool close: Azure Storage Events allow applications to receive
+            notifications when files change. When Azure Storage Events are
+            enabled, a file changed event is raised. This event has a property
+            indicating whether this is the final change to distinguish the
+            difference between an intermediate flush to a file stream and the
+            final close of a file stream. The close query parameter is valid only
+            when the action is "flush" and change notifications are enabled. If
+            the value of close is "true" and the flush operation completes
+            successfully, the service raises a file change notification with a
+            property indicating that this is the final update (the file stream has
+            been closed). If "false" a change notification is raised indicating
+            the file has changed. The default is false. This query parameter is
+            set to true by the Hadoop ABFS driver to indicate that the file stream
+            has been closed."
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword lease_action:
+            Used to perform lease operations along with appending data.
+
+            "acquire" - Acquire a lease.
+            "auto-renew" - Re-new an existing lease.
+            "release" - Release the lease once the operation is complete.
+            "acquire-release" - Acquire a lease and release it once the operations is complete.
+        :paramtype lease_action: Literal["acquire", "auto-renew", "release", "acquire-release"]
+        :keyword int lease_duration:
+            Valid if `lease_action` is set to "acquire" or "acquire-release".
+
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword lease:
+            Required if the file has an active lease or if `lease_action` is set to "acquire" or "acquire-release".
+            If the file has an existing lease, this will be used to access the file. If acquiring a new lease,
+            this will be used as the new lease id.
+            Value can be a DataLakeLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :returns: response header in dict.
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START upload_file_to_file_system]
+                :end-before: [END upload_file_to_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Commit the previous appended data.
+        """
+        options = self._flush_data_options(
+            offset,
+            self.scheme,
+            retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+        try:
+            return await self._client.path.flush_data(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def download_file(self, offset=None, length=None, **kwargs):
+        # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content, or readinto() must be used to download the file into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword lease:
+            If specified, download only succeeds if the file's lease is active
+            and matches this ID. Required if the file has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file was created with a Customer-Provided Key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START read_file]
+                :end-before: [END read_file]
+                :language: python
+                :dedent: 4
+                :caption: Return the downloaded data.
+        """
+        downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+        return StorageStreamDownloader(downloader)
+
+    @distributed_trace_async
+    async def rename_file(self, new_name, **kwargs):
+        # type: (str, **Any) -> DataLakeFileClient
+        """
+        Rename the source file.
+
+        :param str new_name: the new file name the user want to rename to.
+            The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: the renamed file client
+        :rtype: DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+                :start-after: [START rename_file]
+                :end-before: [END rename_file]
+                :language: python
+                :dedent: 4
+                :caption: Rename the source file.
+        """
+        new_file_system, new_path, new_file_sas = self._parse_rename_path(new_name)
+
+        new_file_client = DataLakeFileClient(
+            f"{self.scheme}://{self.primary_hostname}", new_file_system, file_path=new_path,
+            credential=self._raw_credential or new_file_sas,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode)
+        await new_file_client._rename_path(  # pylint: disable=protected-access
+            f'/{quote(unquote(self.file_system_name))}/{quote(unquote(self.path_name))}{self._query_str}', **kwargs)
+        return new_file_client
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py
new file mode 100644
index 00000000..0dae4306
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_lease_async.py
@@ -0,0 +1,269 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Union, Optional, Any,
+    TypeVar, TYPE_CHECKING
+)
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import BlobLeaseClient
+from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase
+
+
+if TYPE_CHECKING:
+    FileSystemClient = TypeVar("FileSystemClient")
+    DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+    DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(DataLakeLeaseClientBase):  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new DataLakeLeaseClient.
+
+    This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file system, directory, or file to lease.
+    :type client: ~azure.storage.filedatalake.aio.FileSystemClient or
+        ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(
+            self, client, lease_id=None
+    ):  # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+        # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+        super(DataLakeLeaseClient, self).__init__(client, lease_id)
+
+        if hasattr(client, '_blob_client'):
+            _client = client._blob_client  # type: ignore
+        elif hasattr(client, '_container_client'):
+            _client = client._container_client  # type: ignore
+        else:
+            raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+        self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+    def __enter__(self):
+        raise TypeError("Async lease must use 'async with'.")
+
+    def __exit__(self, *args):
+        self.release()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, lease_duration=-1, **kwargs):
+        # type: (int, Optional[int], **Any) -> None
+        """Requests a new lease.
+
+        If the file/file system does not have an active lease, the DataLake service creates a
+        lease on the file/file system and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+        """
+        await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def renew(self, **kwargs):
+        # type: (Any) -> None
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the file system or file. Note that
+        the lease may be renewed even if it has expired as long as the file system
+        or file has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.renew(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def release(self, **kwargs):
+        # type: (Any) -> None
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the file system or file. Releasing the lease allows another client
+        to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.release(**kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id, **kwargs):
+        # type: (str, Any) -> None
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: None
+        """
+        await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+        self._update_lease_client_attributes()
+
+    @distributed_trace_async
+    async def break_lease(self, lease_break_period=None, **kwargs):
+        # type: (Optional[int], Any) -> int
+        """Break the lease, if the file system or file has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the file system or file.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py
new file mode 100644
index 00000000..093aad92
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_data_lake_service_client_async.py
@@ -0,0 +1,570 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from azure.storage.blob.aio import BlobServiceClient
+from .._serialize import get_api_version
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._deserialize import get_datalake_service_properties
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from ._file_system_client_async import FileSystemClient
+from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase
+from .._shared.policies_async import ExponentialRetry
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._models import FileSystemPropertiesPaged
+from .._models import UserDelegationKey, LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+
+
+class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase):
+    """A client to interact with the DataLake Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete file systems within the account.
+    For operations relating to a specific file system, directory or file, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the datalake service endpoint.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URL to the DataLake storage account. Any other entities included
+        in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_service_async.py
+            :start-after: [START create_datalake_service_client]
+            :end-before: [END create_datalake_service_client]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient from connection string.
+
+        .. literalinclude:: ../samples/datalake_samples_service_async.py
+            :start-after: [START create_datalake_service_client_oauth]
+            :end-before: [END create_datalake_service_client_oauth]
+            :language: python
+            :dedent: 4
+            :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+            self, account_url: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        super(DataLakeServiceClient, self).__init__(
+            account_url,
+            credential=credential,
+            **kwargs
+        )
+        self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs)
+        self._blob_service_client._hosts[LocationMode.SECONDARY] = ""
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)
+        self._loop = kwargs.get('loop', None)
+
+    async def __aenter__(self):
+        await self._blob_service_client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._blob_service_client.close()
+        await super(DataLakeServiceClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    @distributed_trace_async
+    async def get_user_delegation_key(self, key_start_time,  # type: datetime
+                                      key_expiry_time,  # type: datetime
+                                      **kwargs  # type: Any
+                                      ):
+        # type: (...) -> UserDelegationKey
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_user_delegation_key]
+                :end-before: [END get_user_delegation_key]
+                :language: python
+                :dedent: 8
+                :caption: Get user delegation key from datalake service client.
+        """
+        delegation_key = await self._blob_service_client.get_user_delegation_key(
+            key_start_time=key_start_time,
+            key_expiry_time=key_expiry_time,
+            **kwargs)
+        return UserDelegationKey._from_generated(delegation_key)  # pylint: disable=protected-access
+
+    @distributed_trace
+    def list_file_systems(self, name_starts_with=None,  # type: Optional[str]
+                          include_metadata=None,  # type: Optional[bool]
+                          **kwargs):
+        # type: (...) -> ItemPaged[FileSystemProperties]
+        """Returns a generator to list the file systems under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all file systems have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only file systems whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that file system metadata be returned in the response.
+            The default value is `False`.
+        :keyword int results_per_page:
+            The maximum number of file system names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :keyword bool include_deleted:
+            Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.3.0
+        :keyword bool include_system:
+            Flag specifying that system filesystems should be included.
+            .. versionadded:: 12.6.0
+        :returns: An iterable (auto-paging) of FileSystemProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START list_file_systems]
+                :end-before: [END list_file_systems]
+                :language: python
+                :dedent: 8
+                :caption: Listing the file systems in the datalake service.
+        """
+        item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+                                                               include_metadata=include_metadata,
+                                                               **kwargs)
+        item_paged._page_iterator_class = FileSystemPropertiesPaged  # pylint: disable=protected-access
+        return item_paged
+
+    @distributed_trace_async
+    async def create_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                                 metadata=None,  # type: Optional[Dict[str, str]]
+                                 public_access=None,  # type: Optional[PublicAccess]
+                                 **kwargs):
+        # type: (...) -> FileSystemClient
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param str file_system:
+            The name of the file system to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: file system, file.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient under the specified account.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START create_file_system_from_service_client]
+                :end-before: [END create_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Creating a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+        return file_system_client
+
+    async def _rename_file_system(self, name, new_name, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str name:
+            The name of the filesystem to rename.
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with the newly specified name.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        await self._blob_service_client._rename_container(name, new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = self.get_file_system_client(new_name)
+        return renamed_file_system
+
+    @distributed_trace_async
+    async def undelete_file_system(self, name, deleted_version, **kwargs):
+        # type: (str, str, **Any) -> FileSystemClient
+        """Restores soft-deleted filesystem.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.3.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str name:
+            Specifies the name of the deleted filesystem to restore.
+        :param str deleted_version:
+            Specifies the version of the deleted filesystem to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: The FileSystemClient of the restored soft-deleted filesystem.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs)
+        file_system = self.get_file_system_client(new_name or name)
+        return file_system
+
+    @distributed_trace_async
+    async def delete_file_system(self, file_system,  # type: Union[FileSystemProperties, str]
+                                 **kwargs):
+        # type: (...) -> FileSystemClient
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :param file_system:
+            The file system to delete. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient after marking the specified file system for deletion.
+        :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START delete_file_system_from_service_client]
+                :end-before: [END delete_file_system_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Deleting a file system in the datalake service.
+        """
+        file_system_client = self.get_file_system_client(file_system)
+        await file_system_client.delete_file_system(**kwargs)
+        return file_system_client
+
+    def get_file_system_client(self, file_system  # type: Union[FileSystemProperties, str]
+                               ):
+        # type: (...) -> FileSystemClient
+        """Get a client to interact with the specified file system.
+
+        The file system need not already exist.
+
+        :param file_system:
+            The file system. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :returns: A FileSystemClient.
+        :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_system_client_from_service]
+                :end-before: [END create_file_system_client_from_service]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file system client to interact with a specific file system.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+                                api_version=self.api_version,
+                                _configuration=self._config,
+                                _pipeline=_pipeline, _hosts=self._hosts)
+
+    def get_directory_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                             directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param file_system:
+            The file system that the directory is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_directory_client_from_service_client]
+                :end-before: [END get_directory_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            directory_name = directory.name
+        except AttributeError:
+            directory_name = directory
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts)
+
+    def get_file_client(self, file_system,  # type: Union[FileSystemProperties, str]
+                        file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_system:
+            The file system that the file is in. This can either be the name of the file system,
+            or an instance of FileSystemProperties.
+        :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+        :param file_path:
+            The file with which to interact. This can either be the full path of the file(from the root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_service_async.py
+                :start-after: [START get_file_client_from_service_client]
+                :end-before: [END get_file_client_from_service_client]
+                :language: python
+                :dedent: 8
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_system_name = file_system.name
+        except AttributeError:
+            file_system_name = file_system
+        try:
+            file_path = file_path.name
+        except AttributeError:
+            pass
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline)
+
+    @distributed_trace_async
+    async def set_service_properties(self, **kwargs):
+        # type: (**Any) -> None
+        """Sets the properties of a storage account's Datalake service, including
+        Azure Storage Analytics.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+        :keyword hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates.
+        :type hour_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute.
+        :type minute_metrics: ~azure.storage.filedatalake.Metrics
+        :keyword cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.filedatalake.CorsRule]
+        :keyword str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :keyword delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted files/directories.
+            It also specifies the number of days and versions of file/directory to keep.
+        :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+        :keyword static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.filedatalake.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        """
+        await self._blob_service_client.set_service_properties(**kwargs)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs):
+        # type: (**Any) -> Dict[str, Any]
+        """Gets the properties of a storage account's datalake service, including
+        Azure Storage Analytics.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An object containing datalake service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: dict[str, Any]
+        """
+        props = await self._blob_service_client.get_service_properties(**kwargs)
+        return get_datalake_service_properties(props)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py
new file mode 100644
index 00000000..e22a9df9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_download_async.py
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import AsyncIterator, IO, Optional
+
+from .._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+    """A streaming object to download from Azure Storage.
+
+    :ivar str name:
+        The name of the file being downloaded.
+    :ivar ~azure.storage.filedatalake.FileProperties properties:
+        The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties.
+    :ivar int size:
+        The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file.
+    """
+
+    def __init__(self, downloader):
+        self._downloader = downloader
+        self.name = self._downloader.name
+
+        # Parse additional Datalake-only properties
+        encryption_context = self._downloader._response.response.headers.get('x-ms-encryption-context')
+        acl = self._downloader._response.response.headers.get('x-ms-acl')
+
+        self.properties = from_blob_properties(
+            self._downloader.properties,
+            encryption_context=encryption_context,
+            acl=acl)
+        self.size = self._downloader.size
+
+    def __len__(self):
+        return self.size
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """Iterate over chunks in the download stream.
+
+        :returns: An async iterator over the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+        """
+        return self._downloader.chunks()
+
+    async def read(self, size: Optional[int] = -1) -> bytes:
+        """
+        Read up to size bytes from the stream and return them. If size
+        is unspecified or is -1, all bytes will be read.
+
+        :param Optional[int] size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set to -1 to download all bytes.
+        :returns:
+            The requested data as bytes. If the return value is empty, there is no more data to read.
+        :rtype: bytes
+        """
+        return await self._downloader.read(size)
+
+    async def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :returns: The contents of the file.
+        :rtype: bytes
+        """
+        return await self._downloader.readall()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        return await self._downloader.readinto(stream)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py
new file mode 100644
index 00000000..9c3122a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_file_system_client_async.py
@@ -0,0 +1,1004 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+from typing import (  # pylint: disable=unused-import
+    Union, Optional, Any, Dict, List, Tuple,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+
+from azure.core.pipeline import AsyncPipeline
+from azure.core.async_paging import AsyncItemPaged
+
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import ContainerClient
+from .._serialize import get_api_version
+from .._deserialize import process_storage_error, is_file_path
+from .._generated.models import ListBlobsIncludeItem
+
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._file_system_client import FileSystemClient as FileSystemClientBase
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from .._shared.policies_async import ExponentialRetry
+from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties
+from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged
+
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from datetime import datetime
+    from .._models import PathProperties
+
+
+class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase):
+    """A client to interact with a specific file system, even if that file system
+    may not yet exist.
+
+    For operations relating to a specific directory or file within this file system, a directory client or file client
+    can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+    :ivar str url:
+        The full endpoint URL to the file system, including SAS token if used.
+    :ivar str primary_endpoint:
+        The full primary endpoint URL.
+    :ivar str primary_hostname:
+        The hostname of the primary endpoint.
+    :param str account_url:
+        The URI to the storage account.
+    :param file_system_name:
+        The file system for the directory or files.
+    :type file_system_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+            :start-after: [START create_file_system_client_from_service]
+            :end-before: [END create_file_system_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+     """
+
+    def __init__(
+        self, account_url: str,
+        file_system_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        super(FileSystemClient, self).__init__(
+            account_url,
+            file_system_name=file_system_name,
+            credential=credential,
+            **kwargs)
+        # to override the class field _container_client sync version
+        kwargs.pop('_hosts', None)
+        self._container_client = ContainerClient(self._blob_account_url, self.file_system_name,
+                                                 credential=credential,
+                                                 _hosts=self._container_client._hosts,
+                                                 **kwargs)  # type: ignore
+        self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url,
+                                                   file_system=self.file_system_name, pipeline=self._pipeline)
+        self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+                                                                               base_url=self._container_client.url,
+                                                                               file_system=self.file_system_name,
+                                                                               pipeline=self._pipeline)
+        api_version = get_api_version(kwargs)
+        self._client._config.version = api_version
+        self._datalake_client_for_blob_operation._config.version = api_version
+
+        self._loop = kwargs.get('loop', None)
+
+    async def __aexit__(self, *args):
+        await self._container_client.close()
+        await self._datalake_client_for_blob_operation.close()
+        await super(FileSystemClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    @distributed_trace_async
+    async def acquire_lease(
+            self, lease_duration=-1,  # type: int
+            lease_id=None,  # type: Optional[str]
+            **kwargs
+    ):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file system does not have an active lease,
+        the DataLake service creates a lease on the file system and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START acquire_lease_on_file_system]
+                :end-before: [END acquire_lease_on_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on the file_system.
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def create_file_system(self, metadata=None,  # type: Optional[Dict[str, str]]
+                                 public_access=None,  # type: Optional[PublicAccess]
+                                 **kwargs):
+        # type: (...) ->  Dict[str, Union[str, datetime]]
+        """Creates a new file system under the specified account.
+
+        If the file system with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created file system.
+
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            file system as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :type public_access: ~azure.storage.filedatalake.PublicAccess
+        :keyword encryption_scope_options:
+            Specifies the default encryption scope to set on the file system and use for
+            all future writes.
+
+            .. versionadded:: 12.9.0
+
+        :paramtype encryption_scope_options: dict or ~azure.storage.filedatalake.EncryptionScopeOptions
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary of response headers.
+        :rtype: dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_system]
+                :end-before: [END create_file_system]
+                :language: python
+                :dedent: 16
+                :caption: Creating a file system in the datalake service.
+        """
+        encryption_scope_options = kwargs.pop('encryption_scope_options', None)
+        return await self._container_client.create_container(metadata=metadata,
+                                                             public_access=public_access,
+                                                             container_encryption_scope=encryption_scope_options,
+                                                             **kwargs)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a file system exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a file system exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._container_client.exists(**kwargs)
+
+    @distributed_trace_async
+    async def _rename_file_system(self, new_name, **kwargs):
+        # type: (str, **Any) -> FileSystemClient
+        """Renames a filesystem.
+
+        Operation is successful only if the source filesystem exists.
+
+        :param str new_name:
+            The new filesystem name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source filesystem.
+        :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: FileSystemClient with renamed properties.
+        :rtype: ~azure.storage.filedatalake.FileSystemClient
+        """
+        await self._container_client._rename_container(new_name, **kwargs)   # pylint: disable=protected-access
+        renamed_file_system = FileSystemClient(
+                f"{self.scheme}://{self.primary_hostname}", file_system_name=new_name,
+                credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts)
+        return renamed_file_system
+
+    @distributed_trace_async
+    async def delete_file_system(self, **kwargs):
+        # type: (Any) -> None
+        """Marks the specified file system for deletion.
+
+        The file system and any files contained within it are later deleted during garbage collection.
+        If the file system is not found, a ResourceNotFoundError will be raised.
+
+        :keyword lease:
+            If specified, delete_file_system only succeeds if the
+            file system's lease is active and matches this ID.
+            Required if the file system has an active lease.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START delete_file_system]
+                :end-before: [END delete_file_system]
+                :language: python
+                :dedent: 16
+                :caption: Deleting a file system in the datalake service.
+        """
+        await self._container_client.delete_container(**kwargs)
+
+    @distributed_trace_async
+    async def get_file_system_properties(self, **kwargs):
+        # type: (Any) -> FileSystemProperties
+        """Returns all user-defined metadata and system properties for the specified
+        file system. The data returned does not include the file system's list of paths.
+
+        :keyword lease:
+            If specified, get_file_system_properties only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: Properties for the specified file system within a file system object.
+        :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_file_system_properties]
+                :end-before: [END get_file_system_properties]
+                :language: python
+                :dedent: 16
+                :caption: Getting properties on the file system.
+        """
+        container_properties = await self._container_client.get_container_properties(**kwargs)
+        return FileSystemProperties._convert_from_container_props(container_properties)  # pylint: disable=protected-access
+
+    @distributed_trace_async
+    async def set_file_system_metadata(  # type: ignore
+            self, metadata,  # type: Dict[str, str]
+            **kwargs
+    ):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START set_file_system_metadata]
+                :end-before: [END set_file_system_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Setting metadata on the container.
+        """
+        return await self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def set_file_system_access_policy(
+            self, signed_identifiers,  # type: Dict[str, AccessPolicy]
+            public_access=None,  # type: Optional[Union[str, PublicAccess]]
+            **kwargs
+    ):  # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets the permissions for the specified file system or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a file system may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the file system. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+        :param ~azure.storage.filedatalake.PublicAccess public_access:
+            To specify whether data in the file system may be accessed publicly and the level of access.
+        :keyword lease:
+            Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: filesystem-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or ~datetime.datetime]
+        """
+        return await self._container_client.set_container_access_policy(signed_identifiers,
+                                                                        public_access=public_access, **kwargs)
+
+    @distributed_trace_async
+    async def get_file_system_access_policy(self, **kwargs):
+        # type: (Any) -> Dict[str, Any]
+        """Gets the permissions for the specified file system.
+        The permissions indicate whether file system data may be accessed publicly.
+
+        :keyword lease:
+            If specified, get_file_system_access_policy only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_policy = await self._container_client.get_container_access_policy(**kwargs)
+        return {
+            'public_access': PublicAccess._from_generated(access_policy['public_access']),  # pylint: disable=protected-access
+            'signed_identifiers': access_policy['signed_identifiers']
+        }
+
+    @distributed_trace
+    def get_paths(
+        self, path: Optional[str] = None,
+        recursive: Optional[bool] = True,
+        max_results: Optional[int] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged["PathProperties"]:
+        """Returns a generator to list the paths(could be files or directories) under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str path:
+            Filters the results to return only paths under the specified path.
+        :param Optional[bool] recursive: Optional. Set True for recursive, False for iterative.
+        :param int max_results:
+            An optional value that specifies the maximum
+            number of items to return per page. If omitted or greater than 5,000, the
+            response will include up to 5,000 items per page.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User
+            Principal Names in the owner, group, and acl fields of
+            :class:`~azure.storage.filedatalake.PathProperties`. If False, the values will be returned
+            as Azure Active Directory Object IDs. The default value is False. Note that group and application
+            Object IDs are not translate because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of PathProperties.
+        :rtype: ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.PathProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_paths_in_file_system]
+                :end-before: [END get_paths_in_file_system]
+                :language: python
+                :dedent: 12
+                :caption: List the blobs in the file system.
+        """
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.file_system.list_paths,
+            path=path,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, recursive, path=path, max_results=max_results,
+            page_iterator_class=PathPropertiesPaged, **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                               metadata=None,  # type: Optional[Dict[str, str]]
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Create directory
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient with new directory and metadata.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_directory_from_file_system]
+                :end-before: [END create_directory_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Create directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        await directory_client.create_directory(metadata=metadata, **kwargs)
+        return directory_client
+
+    @distributed_trace_async
+    async def delete_directory(self, directory,  # type: Union[DirectoryProperties, str]
+                               **kwargs):
+        # type: (...) -> DataLakeDirectoryClient
+        """
+        Marks the specified path for deletion.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeDirectoryClient after deleting specified directory.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START delete_directory_from_file_system]
+                :end-before: [END delete_directory_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Delete directory in the file system.
+        """
+        directory_client = self.get_directory_client(directory)
+        await directory_client.delete_directory(**kwargs)
+        return directory_client
+
+    @distributed_trace_async
+    async def create_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Create file
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword str permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: DataLakeFileClient with new file created.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START create_file_from_file_system]
+                :end-before: [END create_file_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Create file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        await file_client.create_file(**kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def delete_file(self, file,  # type: Union[FileProperties, str]
+                          **kwargs):
+        # type: (...) -> DataLakeFileClient
+        """
+        Marks the specified file for deletion.
+
+        :param file:
+            The file with which to interact. This can either be the name of the file,
+            or an instance of FileProperties.
+        :type file: str or ~azure.storage.filedatalake.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: DataLakeFileClient after deleting specified file.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeFileClient
+
+        .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+            :start-after: [START delete_file_from_file_system]
+            :end-before: [END delete_file_from_file_system]
+            :language: python
+            :dedent: 12
+            :caption: Delete file in the file system.
+        """
+        file_client = self.get_file_client(file)
+        await file_client.delete_file(**kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+        # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+        """Restores soft-deleted path.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :param str deleted_path_name:
+            Specifies the name of the deleted container to restore.
+        :param str deletion_id:
+            Specifies the version of the deleted container to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: Returns the DataLake client for the restored soft-deleted path.
+        :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+                or azure.storage.file.datalake.aio.DataLakeFileClient
+        """
+        _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+        pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        path_client = AzureDataLakeStorageRESTAPI(
+            url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+        try:
+            is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+            if is_file:
+                return self.get_file_client(deleted_path_name)
+            return self.get_directory_client(deleted_path_name)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_root_directory_client(self):
+        # type: () -> DataLakeDirectoryClient
+        """Get a client to interact with the root directory.
+
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+        """
+        return self.get_directory_client('/')
+
+    def get_directory_client(self, directory  # type: Union[DirectoryProperties, str]
+                             ):
+        # type: (...) -> DataLakeDirectoryClient
+        """Get a client to interact with the specified directory.
+
+        The directory need not already exist.
+
+        :param directory:
+            The directory with which to interact. This can either be the name of the directory,
+            or an instance of DirectoryProperties.
+        :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+        :returns: A DataLakeDirectoryClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_directory_client_from_file_system]
+                :end-before: [END get_directory_client_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Getting the directory client to interact with a specific directory.
+        """
+        try:
+            directory_name = directory.get('name')
+        except AttributeError:
+            directory_name = str(directory)
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+                                       credential=self._raw_credential,
+                                       api_version=self.api_version,
+                                       _configuration=self._config, _pipeline=_pipeline,
+                                       _hosts=self._hosts,
+                                       loop=self._loop)
+
+    def get_file_client(self, file_path  # type: Union[FileProperties, str]
+                        ):
+        # type: (...) -> DataLakeFileClient
+        """Get a client to interact with the specified file.
+
+        The file need not already exist.
+
+        :param file_path:
+            The file with which to interact. This can either be the path of the file(from root directory),
+            or an instance of FileProperties. eg. directory/subdirectory/file
+        :type file_path: str or ~azure.storage.filedatalake.FileProperties
+        :returns: A DataLakeFileClient.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+                :start-after: [START get_file_client_from_file_system]
+                :end-before: [END get_file_client_from_file_system]
+                :language: python
+                :dedent: 12
+                :caption: Getting the file client to interact with a specific file.
+        """
+        try:
+            file_path = file_path.get('name')
+        except AttributeError:
+            file_path = str(file_path)
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies # pylint: disable = protected-access
+        )
+        return DataLakeFileClient(
+            self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+            api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, loop=self._loop)
+
+    @distributed_trace
+    def list_deleted_paths(self, **kwargs):
+        # type: (Any) -> AsyncItemPaged[DeletedPathProperties]
+        """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2020-06-12'.
+
+        :keyword str path_prefix:
+            Filters the results to return only paths under the specified path.
+        :keyword int results_per_page:
+            An optional value that specifies the maximum number of items to return per page.
+            If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: An iterable (auto-paging) response of DeletedPathProperties.
+        :rtype:
+            ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+        """
+        path_prefix = kwargs.pop('path_prefix', None)
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+            showonly=ListBlobsIncludeItem.deleted,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+            results_per_page=results_per_page, **kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py
new file mode 100644
index 00000000..4d802635
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_list_paths_helper.py
@@ -0,0 +1,176 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+from azure.core.exceptions import HttpResponseError
+from azure.core.async_paging import AsyncPageIterator
+
+from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \
+    return_headers_and_deserialized_path_list
+from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+
+from .._shared.models import DictMixin
+from .._shared.response_handlers import return_context_and_deserialized
+from .._generated.models import Path
+from .._models import PathProperties
+
+
+class DeletedPathPropertiesPaged(AsyncPageIterator):
+    """An Iterable of deleted path properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A path name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+    :ivar str container: The container that the paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+    :param callable command: Function to retrieve the next page of items.
+    """
+    def __init__(
+            self, command,
+            container=None,
+            prefix=None,
+            results_per_page=None,
+            continuation_token=None,
+            delimiter=None,
+            location_mode=None):
+        super(DeletedPathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                max_results=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = self._response.segment.blob_prefixes  + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobItemInternal):
+            file_props = get_deleted_path_properties_from_generated_code(item)
+            file_props.file_system = self.container
+            return file_props
+        if isinstance(item, GenBlobPrefix):
+            return DirectoryPrefix(
+                container=self.container,
+                prefix=item.name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
+
+
+class DirectoryPrefix(DictMixin):
+    """Directory prefix.
+
+    :ivar str name: Name of the deleted directory.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar str file_system: The file system that the deleted paths are listed from.
+    :ivar str delimiter: A delimiting character used for hierarchy listing.
+    """
+    def __init__(self, **kwargs):
+        self.name = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.file_system = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class PathPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Path properties.
+
+    :ivar str path: Filters the results to return only paths under the specified path.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results.
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str path: Filters the results to return only paths under the specified path.
+    :param int max_results: The maximum number of paths to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(
+            self, command,
+            recursive,
+            path=None,
+            max_results=None,
+            continuation_token=None,
+            upn=None):
+        super(PathPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.recursive = recursive
+        self.results_per_page = max_results
+        self.path = path
+        self.upn = upn
+        self.current_page = None
+        self.path_list = None
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                self.recursive,
+                continuation=continuation_token or None,
+                path=self.path,
+                max_results=self.results_per_page,
+                upn=self.upn,
+                cls=return_headers_and_deserialized_path_list)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.path_list, self._response = get_next_return
+        self.current_page = [self._build_item(item) for item in self.path_list]
+
+        return self._response['continuation'] or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, PathProperties):
+            return item
+        if isinstance(item, Path):
+            path = PathProperties._from_generated(item)  # pylint: disable=protected-access
+            return path
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py
new file mode 100644
index 00000000..923cbb61
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_models.py
@@ -0,0 +1,40 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+from azure.storage.blob.aio._models import ContainerPropertiesPaged
+from .._models import FileSystemProperties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+    """An Iterable of File System properties.
+
+    :ivar str service_endpoint: The service URL.
+    :ivar str prefix: A file system name prefix being used to filter the list.
+    :ivar str marker: The continuation token of the current page of results.
+    :ivar int results_per_page: The maximum number of results retrieved per API call.
+    :ivar str continuation_token: The continuation token to retrieve the next page of results.
+    :ivar str location_mode: The location mode being used to list results. The available
+        options include "primary" and "secondary".
+    :ivar current_page: The current page of listed results.
+    :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
+
+    :param callable command: Function to retrieve the next page of items.
+    :param str prefix: Filters the results to return only file systems whose names
+        begin with the specified prefix.
+    :param int results_per_page: The maximum number of file system names to retrieve per
+        call.
+    :param str continuation_token: An opaque continuation token.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(FileSystemPropertiesPaged, self).__init__(
+            *args,
+            **kwargs
+        )
+
+    @staticmethod
+    def _build_item(item):
+        return FileSystemProperties._from_generated(item)  # pylint: disable=protected-access
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py
new file mode 100644
index 00000000..774f687d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_path_client_async.py
@@ -0,0 +1,901 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method, docstring-keyword-should-match-keyword-only
+
+from datetime import datetime
+from typing import (
+    Any, Dict, Optional, Union,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.storage.blob.aio import BlobClient
+from .._serialize import get_api_version, compare_api_versions
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin
+from .._path_client import PathClient as PathClientBase
+from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \
+    AccessControlChangeCounters, AccessControlChanges
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._deserialize import process_storage_error
+from .._shared.policies_async import ExponentialRetry
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings, FileProperties
+
+
+class PathClient(AsyncStorageAccountHostsMixin, PathClientBase):
+    """A base client for interacting with a DataLake file/directory, even if the file/directory may not
+    yet exist.
+
+    :param str account_url:
+        The URI to the storage account.
+    :param str file_system_name:
+        The file system for the directory or files.
+    :param str file_path:
+        The whole file path, so that to interact with a specific file.
+        eg. "{directory}/{subdirectory}/{file}"
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+            self, account_url: str,
+            file_system_name: str,
+            path_name: str,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+        ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+
+        super(PathClient, self).__init__(account_url,  # pylint: disable=specify-parameter-names-in-call
+                                         file_system_name, path_name,
+                                         credential=credential,
+                                         **kwargs)  # type: ignore
+
+        kwargs.pop('_hosts', None)
+
+        self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=self.file_system_name,
+                                       blob_name=self.path_name,
+                                       credential=credential,
+                                       _hosts=self._blob_client._hosts,
+                                       **kwargs)
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client(self.url)
+        self._datalake_client_for_blob_operation = self._build_generated_client(self._blob_client.url)
+        self._loop = kwargs.get('loop', None)
+
+    def _build_generated_client(self, url: str) -> AzureDataLakeStorageRESTAPI:
+        client = AzureDataLakeStorageRESTAPI(
+            url,
+            base_url=url,
+            file_system=self.file_system_name,
+            path=self.path_name,
+            pipeline=self._pipeline
+        )
+        client._config.version = self._api_version  # pylint: disable=protected-access
+        return client
+
+    async def __aexit__(self, *args):
+        await self._blob_client.close()
+        await self._datalake_client_for_blob_operation.close()
+        await super(PathClient, self).__aexit__(*args)
+
+    async def close(self):
+        # type: () -> None
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self.__aexit__()
+
+    async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Create directory or file
+
+        :param resource_type:
+            Required for Create File and Create Directory.
+            The value must be "file" or "directory". Possible values include:
+            'directory', 'file'
+        :type resource_type: str
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :param metadata:
+            Name-value pairs associated with the file/directory as metadata.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword str umask:
+            Optional and only valid if Hierarchical Namespace is enabled for the account.
+            When creating a file or directory and the parent folder does not have a default ACL,
+            the umask restricts the permissions of the file or directory to be created.
+            The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+            For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+            The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+            The umask must be specified in 4-digit octal notation (e.g. 0766).
+        :keyword str owner:
+            The owner of the file or directory.
+        :keyword str group:
+            The owning group of the file or directory.
+        :keyword str acl:
+            Sets POSIX access control rights on files and directories. The value is a
+            comma-separated list of access control entries. Each access control entry (ACE) consists of a
+            scope, a type, a user or group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change.
+        :keyword expires_on:
+            The time to set the file to expiry.
+            If the type of expires_on is an int, expiration time will be set
+            as the number of milliseconds elapsed from creation time.
+            If the type of expires_on is datetime, expiration time will be set
+            absolute to the time provided. If no time zone info is provided, this
+            will be interpreted as UTC.
+        :paramtype expires_on: datetime or int
+        :keyword permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+        :type permissions: str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A dictionary of response headers.
+        :keyword str encryption_context:
+            Specifies the encryption context to set on the file.
+        :rtype: dict[str, Union[str, datetime]]
+        """
+        lease_id = kwargs.get('lease_id', None)
+        lease_duration = kwargs.get('lease_duration', None)
+        if lease_id and not lease_duration:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        if lease_duration and not lease_id:
+            raise ValueError("Please specify a lease_id and a lease_duration.")
+        options = self._create_path_options(
+            resource_type,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return await self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _delete(self, **kwargs):
+        # type: (**Any) -> Dict[Union[datetime, str]]
+        """
+        Marks the specified path for deletion.
+
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A dictionary containing information about the deleted path.
+        :rtype: dict[str, Any]
+        """
+        # Perform paginated delete only if using OAuth, deleting a directory, and api version is 2023-08-03 or later
+        # The pagination is only for ACL checks, the final request remains the atomic delete operation
+        paginated = None
+        if (compare_api_versions(self.api_version, '2023-08-03') >= 0 and
+            hasattr(self.credential, 'get_token') and
+            kwargs.get('recursive')):  # Directory delete will always specify recursive
+            paginated = True
+
+        options = self._delete_path_options(paginated, **kwargs)
+        try:
+            response_headers = await self._client.path.delete(**options)
+            # Loop until continuation token is None for paginated delete
+            while response_headers['continuation']:
+                response_headers = await self._client.path.delete(
+                    continuation=response_headers['continuation'],
+                    **options)
+
+            return response_headers
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_access_control(self, owner=None,  # type: Optional[str]
+                                 group=None,  # type: Optional[str]
+                                 permissions=None,  # type: Optional[str]
+                                 acl=None,  # type: Optional[str]
+                                 **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """
+        Set the owner, group, permissions, or access control list for a path.
+
+        :param owner:
+            Optional. The owner of the file or directory.
+        :type owner: str
+        :param group:
+            Optional. The owning group of the file or directory.
+        :type group: str
+        :param permissions:
+            Optional and only valid if Hierarchical Namespace
+            is enabled for the account. Sets POSIX access permissions for the file
+            owner, the file owning group, and others. Each class may be granted
+            read, write, or execute permission.  The sticky bit is also supported.
+            Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+            supported.
+            permissions and acl are mutually exclusive.
+        :type permissions: str
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+            permissions and acl are mutually exclusive.
+        :type acl: str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: dict containing access control options after setting modifications (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+        try:
+            return await self._client.path.set_access_control(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_access_control(self, upn=None,  # type: Optional[bool]
+                                 **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """
+        Get the owner, group, permissions, or access control list for a path.
+
+        :param upn:
+            Optional. Valid only when Hierarchical Namespace is
+            enabled for the account. If "true", the user identity values returned
+            in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+            transformed from Azure Active Directory Object IDs to User Principal
+            Names.  If "false", the values will be returned as Azure Active
+            Directory Object IDs. The default value is false. Note that group and
+            application Object IDs are not translated because they do not have
+            unique friendly names.
+        :type upn: bool
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing access control options (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        options = self._get_access_control_options(upn=upn, **kwargs)
+        try:
+            return await self._client.path.get_properties(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Sets the Access Control on a path and sub-paths.
+
+        :param acl:
+            Sets POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    @distributed_trace_async
+    async def update_access_control_recursive(self, acl, **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Modifies the Access Control on a path and sub-paths.
+
+        :param acl:
+            Modifies POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, a user or
+            group identifier, and permissions in the format
+            "[scope:][type]:[id]:[permissions]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single,
+            change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    @distributed_trace_async
+    async def remove_access_control_recursive(self,
+                                              acl,
+                                              **kwargs):
+        # type: (str, **Any) -> AccessControlChangeResult
+        """
+        Removes the Access Control on a path and sub-paths.
+
+        :param acl:
+            Removes POSIX access control rights on files and directories.
+            The value is a comma-separated list of access control entries. Each
+            access control entry (ACE) consists of a scope, a type, and a user or
+            group identifier in the format "[scope:][type]:[id]".
+        :type acl: str
+        :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+            Callback where the caller can track progress of the operation
+            as well as collect paths that failed to change Access Control.
+        :keyword str continuation_token:
+            Optional continuation token that can be used to resume previously stopped operation.
+        :keyword int batch_size:
+            Optional. If data set size exceeds batch size then operation will be split into multiple
+            requests so that progress can be tracked. Batch size should be between 1 and 2000.
+            The default when unspecified is 2000.
+        :keyword int max_batches:
+            Optional. Defines maximum number of batches that single change Access Control operation can execute.
+            If maximum is reached before all sub-paths are processed,
+            then continuation token can be used to resume operation.
+            Empty value indicates that maximum number of batches in unbound and operation continues till end.
+        :keyword bool continue_on_failure:
+            If set to False, the operation will terminate quickly on encountering user errors (4XX).
+            If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+            the directory.
+            Continuation token will only be returned when continue_on_failure is True in case of user errors.
+            If not set the default value is False for this.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :return: A summary of the recursive operations, including the count of successes and failures,
+            as well as a continuation token in case the operation was terminated prematurely.
+        :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+        :raises ~azure.core.exceptions.AzureError:
+            User can restart the operation using continuation_token field of AzureError if the token is available.
+        """
+        if not acl:
+            raise ValueError("The Access Control List must be set for this operation")
+
+        progress_hook = kwargs.pop('progress_hook', None)
+        max_batches = kwargs.pop('max_batches', None)
+        options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+        return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+                                                       max_batches=max_batches)
+
+    async def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+        try:
+            continue_on_failure = options.get('force_flag')
+            total_directories_successful = 0
+            total_files_success = 0
+            total_failure_count = 0
+            batch_count = 0
+            last_continuation_token = None
+            current_continuation_token = None
+            continue_operation = True
+            while continue_operation:
+                headers, resp = await self._client.path.set_access_control_recursive(**options)
+
+                # make a running tally so that we can report the final results
+                total_directories_successful += resp.directories_successful
+                total_files_success += resp.files_successful
+                total_failure_count += resp.failure_count
+                batch_count += 1
+                current_continuation_token = headers['continuation']
+
+                if current_continuation_token is not None:
+                    last_continuation_token = current_continuation_token
+
+                if progress_hook is not None:
+                    await progress_hook(AccessControlChanges(
+                        batch_counters=AccessControlChangeCounters(
+                            directories_successful=resp.directories_successful,
+                            files_successful=resp.files_successful,
+                            failure_count=resp.failure_count,
+                        ),
+                        aggregate_counters=AccessControlChangeCounters(
+                            directories_successful=total_directories_successful,
+                            files_successful=total_files_success,
+                            failure_count=total_failure_count,
+                        ),
+                        batch_failures=[AccessControlChangeFailure(
+                            name=failure.name,
+                            is_directory=failure.type == 'DIRECTORY',
+                            error_message=failure.error_message) for failure in resp.failed_entries],
+                        continuation=last_continuation_token))
+
+                # update the continuation token, if there are more operations that cannot be completed in a single call
+                max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+                continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+                options['continuation'] = current_continuation_token
+
+            # currently the service stops on any failure, so we should send back the last continuation token
+            # for the user to retry the failed updates
+            # otherwise we should just return what the service gave us
+            return AccessControlChangeResult(counters=AccessControlChangeCounters(
+                directories_successful=total_directories_successful,
+                files_successful=total_files_success,
+                failure_count=total_failure_count),
+                continuation=last_continuation_token
+                if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+        except HttpResponseError as error:
+            error.continuation_token = last_continuation_token
+            process_storage_error(error)
+        except AzureError as error:
+            error.continuation_token = last_continuation_token
+            raise error
+
+    async def _rename_path(self, rename_source, **kwargs):
+        # type: (str, **Any) -> Dict[str, Any]
+        """
+        Rename directory or file
+
+        :param rename_source: The value must have the following format: "/{filesystem}/{path}".
+        :type rename_source: str
+        :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set path properties.
+        :keyword source_lease: A lease ID for the source path. If specified,
+            the source path must have an active lease and the lease ID must
+            match.
+        :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword lease:
+            Required if the file/directory has an active lease. Value can be a LeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: response dict containing information about the renamed path.
+        :rtype: dict[str, Any]
+        """
+        options = self._rename_path_options(
+            rename_source,
+            **kwargs)
+        try:
+            return await self._client.path.create(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _get_path_properties(self, **kwargs):
+        # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file or directory. It does not return the content of the directory or file.
+
+        :keyword lease:
+            Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Decrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            Required if the file/directory was created with a customer-provided key.
+        :keyword bool upn:
+            If True, the user identity values returned in the x-ms-owner, x-ms-group,
+            and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User 
+            Principal Names in the owner, group, and acl fields of the respective property object returned.
+            If False, the values will be returned as Azure Active Directory Object IDs.
+            The default value is False. Note that group and application Object IDs are not translate
+            because they do not have unique friendly names.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns:
+            Information including user-defined metadata, standard HTTP properties,
+            and system properties for the file or directory.
+        :rtype: DirectoryProperties or FileProperties
+        """
+        upn = kwargs.pop('upn', None)
+        if upn:
+            headers = kwargs.pop('headers', {})
+            headers['x-ms-upn'] = str(upn)
+            kwargs['headers'] = headers
+        path_properties = await self._blob_client.get_blob_properties(**kwargs)
+        return path_properties
+
+    async def _exists(self, **kwargs):
+        # type: (**Any) -> bool
+        """
+        Returns True if a path exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: True if a path exists, False otherwise.
+        :rtype: bool
+        """
+        return await self._blob_client.exists(**kwargs)
+
+    @distributed_trace_async
+    async def set_metadata(self, metadata,  # type: Dict[str, str]
+                           **kwargs):
+        # type: (...) -> Dict[str, Union[str, datetime]]
+        """Sets one or more user-defined name-value pairs for the specified
+        file system. Each call to this operation replaces all existing metadata
+        attached to the file system. To remove all metadata from the file system,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the file system as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file system-updated property dict (Etag and last modified).
+        :rtype: dict[str, str] or dict[str, ~datetime.datetime]
+        """
+        return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+    @distributed_trace_async
+    async def set_http_headers(self, content_settings=None,  # type: Optional[ContentSettings]
+                               **kwargs):
+        # type: (...) -> Dict[str, Any]
+        """Sets system properties on the file or directory.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.filedatalake.ContentSettings content_settings:
+            ContentSettings object used to set file/directory properties.
+        :keyword lease:
+            If specified, set_file_system_metadata only succeeds if the
+            file system's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: file/directory-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+        """
+        return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+    @distributed_trace_async
+    async def acquire_lease(self, lease_duration=-1,  # type: Optional[int]
+                            lease_id=None,  # type: Optional[str]
+                            **kwargs):
+        # type: (...) -> DataLakeLeaseClient
+        """
+        Requests a new lease. If the file or directory does not have an active lease,
+        the DataLake service creates a lease on the file/directory and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The DataLake service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-datalake
+            #other-client--per-operation-configuration>`_.
+        :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+        """
+        lease = DataLakeLeaseClient(self, lease_id=lease_id)  # type: ignore
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py
new file mode 100644
index 00000000..40d24a03
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/aio/_upload_helper.py
@@ -0,0 +1,104 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from azure.core.exceptions import HttpResponseError
+from .._deserialize import (
+    process_storage_error)
+from .._shared.response_handlers import return_response_headers
+from .._shared.uploads_async import (
+    upload_data_chunks,
+    DataLakeFileChunkUploader, upload_substream_blocks)
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs):  # pylint: disable=unused-argument
+    return any([
+        modified_access_conditions.if_modified_since,
+        modified_access_conditions.if_unmodified_since,
+        modified_access_conditions.if_none_match,
+        modified_access_conditions.if_match
+    ])
+
+
+async def upload_datalake_file(
+        client=None,
+        stream=None,
+        length=None,
+        overwrite=None,
+        validate_content=None,
+        max_concurrency=None,
+        file_settings=None,
+        **kwargs):
+    try:
+        if length == 0:
+            return {}
+        properties = kwargs.pop('properties', None)
+        umask = kwargs.pop('umask', None)
+        permissions = kwargs.pop('permissions', None)
+        path_http_headers = kwargs.pop('path_http_headers', None)
+        modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+        chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+        encryption_context = kwargs.pop('encryption_context', None)
+
+        if not overwrite:
+            # if customers didn't specify access conditions, they cannot flush data to existing file
+            if not _any_conditions(modified_access_conditions):
+                modified_access_conditions.if_none_match = '*'
+            if properties or umask or permissions:
+                raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+        if overwrite:
+            response = await client.create(
+                resource='file',
+                path_http_headers=path_http_headers,
+                properties=properties,
+                modified_access_conditions=modified_access_conditions,
+                umask=umask,
+                permissions=permissions,
+                encryption_context=encryption_context,
+                cls=return_response_headers,
+                **kwargs)
+
+            # this modified_access_conditions will be applied to flush_data to make sure
+            # no other flush between create and the current flush
+            modified_access_conditions.if_match = response['etag']
+            modified_access_conditions.if_none_match = None
+            modified_access_conditions.if_modified_since = None
+            modified_access_conditions.if_unmodified_since = None
+
+        use_original_upload_path = file_settings.use_byte_buffer or \
+            validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            await upload_data_chunks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                **kwargs)
+        else:
+            await upload_substream_blocks(
+                service=client,
+                uploader_class=DataLakeFileChunkUploader,
+                total_size=length,
+                chunk_size=chunk_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                **kwargs
+            )
+
+        return await client.flush_data(position=length,
+                                       path_http_headers=path_http_headers,
+                                       modified_access_conditions=modified_access_conditions,
+                                       close=True,
+                                       cls=return_response_headers,
+                                       **kwargs)
+    except HttpResponseError as error:
+        process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/filedatalake/py.typed
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/__init__.py
new file mode 100644
index 00000000..be8da9c2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/__init__.py
@@ -0,0 +1,99 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import warnings
+
+from ._version import VERSION
+from ._file_client import ShareFileClient
+from ._directory_client import ShareDirectoryClient
+from ._share_client import ShareClient
+from ._share_service_client import ShareServiceClient
+from ._lease import ShareLeaseClient
+from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.models import (
+    LocationMode,
+    ResourceTypes,
+    AccountSasPermissions,
+    StorageErrorCode,
+    Services,
+)
+from ._models import (
+    ShareProperties,
+    DirectoryProperties,
+    Handle,
+    FileProperties,
+    Metrics,
+    RetentionPolicy,
+    CorsRule,
+    ShareSmbSettings,
+    SmbMultichannel,
+    ShareProtocolSettings,
+    ShareProtocols,
+    AccessPolicy,
+    FileSasPermissions,
+    ShareSasPermissions,
+    ContentSettings,
+    NTFSAttributes,
+)
+from ._generated.models import (
+    ShareAccessTier,
+    ShareRootSquash
+)
+
+__version__ = VERSION
+
+
+__all__ = [
+    'ShareFileClient',
+    'ShareDirectoryClient',
+    'ShareClient',
+    'ShareServiceClient',
+    'ShareLeaseClient',
+    'ExponentialRetry',
+    'LinearRetry',
+    'LocationMode',
+    'ResourceTypes',
+    'AccountSasPermissions',
+    'StorageErrorCode',
+    'Metrics',
+    'RetentionPolicy',
+    'CorsRule',
+    'ShareSmbSettings',
+    'ShareAccessTier',
+    'SmbMultichannel',
+    'ShareProtocolSettings',
+    'AccessPolicy',
+    'FileSasPermissions',
+    'ShareSasPermissions',
+    'ShareProtocols',
+    'ShareProperties',
+    'DirectoryProperties',
+    'FileProperties',
+    'ContentSettings',
+    'Handle',
+    'NTFSAttributes',
+    'ShareRootSquash',
+    'generate_account_sas',
+    'generate_share_sas',
+    'generate_file_sas',
+    'Services'
+]
+
+
+# This function is added to deal with HandleItem which is a generated model that
+# was mistakenly added to the module exports. It has been removed import and __all__
+# to prevent it from showing in intellisense/docs but we handle it here to prevent
+# breaking any existing code which may have imported it.
+def __getattr__(name):
+    if name == 'HandleItem':
+        from ._generated.models import HandleItem
+        warnings.warn(
+            "HandleItem is deprecated and should not be used. Use Handle instead.",
+            DeprecationWarning
+        )
+        return HandleItem
+
+    raise AttributeError(f"module 'azure.storage.fileshare' has no attribute {name}")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_deserialize.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_deserialize.py
new file mode 100644
index 00000000..5db600d5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_deserialize.py
@@ -0,0 +1,85 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------_
+from typing import (
+    Any, cast, Dict, List, Optional, Tuple,
+    TYPE_CHECKING
+)
+
+from ._generated.models import ShareFileRangeList
+from ._models import DirectoryProperties, FileProperties, ShareProperties
+from ._shared.response_handlers import deserialize_metadata
+
+if TYPE_CHECKING:
+    from azure.core.pipeline import PipelineResponse
+    from ._shared.models import LocationMode
+
+
+def deserialize_share_properties(response: "PipelineResponse", obj: Any, headers: Dict[str, Any]) -> ShareProperties:
+    metadata = deserialize_metadata(response, obj, headers)
+    share_properties = ShareProperties(
+        metadata=metadata,
+        **headers
+    )
+    return share_properties
+
+
+def deserialize_directory_properties(
+    response: "PipelineResponse",
+    obj: Any,
+    headers: Dict[str, Any]
+) -> DirectoryProperties:
+    metadata = deserialize_metadata(response, obj, headers)
+    directory_properties = DirectoryProperties(
+        metadata=metadata,
+        **headers
+    )
+    return directory_properties
+
+
+def deserialize_file_properties(response: "PipelineResponse", obj: Any, headers: Dict[str, Any]) -> FileProperties:
+    metadata = deserialize_metadata(response, obj, headers)
+    file_properties = FileProperties(
+        metadata=metadata,
+        **headers
+    )
+    if 'Content-Range' in headers:
+        if 'x-ms-content-md5' in headers:
+            file_properties.content_settings.content_md5 = headers['x-ms-content-md5']
+        else:
+            file_properties.content_settings.content_md5 = None
+    return file_properties
+
+
+def deserialize_file_stream(
+    response: "PipelineResponse",
+    obj: Any,
+    headers: Dict[str, Any]
+) -> Tuple["LocationMode", Any]:
+    file_properties = deserialize_file_properties(response, obj, headers)
+    obj.properties = file_properties
+    return response.http_response.location_mode, obj
+
+
+# Extracts out file permission
+def deserialize_permission(response: "PipelineResponse", obj: Any, headers: Dict[str, Any]) -> Optional[str]:  # pylint: disable=unused-argument
+    return cast(Optional[str], obj.permission)
+
+
+# Extracts out file permission key
+def deserialize_permission_key(response: "PipelineResponse", obj: Any, headers: Dict[str, Any]) -> Optional[str]:  # pylint: disable=unused-argument
+    if response is None or headers is None:
+        return None
+    return cast(Optional[str], headers.get('x-ms-file-permission-key', None))
+
+
+def get_file_ranges_result(ranges: ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+    file_ranges = []
+    clear_ranges = []
+    if ranges.ranges:
+        file_ranges = [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges]
+    if ranges.clear_ranges:
+        clear_ranges = [{'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges]
+    return file_ranges, clear_ranges
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client.py
new file mode 100644
index 00000000..0d97031f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client.py
@@ -0,0 +1,983 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import time
+from datetime import datetime
+from typing import (
+    Any, AnyStr, cast, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._deserialize import deserialize_directory_properties
+from ._directory_client_helpers import (
+    _format_url,
+    _from_directory_url,
+    _parse_url
+)
+from ._file_client import ShareFileClient
+from ._generated import AzureFileStorage
+from ._models import DirectoryPropertiesPaged, Handle, HandlesPaged
+from ._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from ._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties
+from ._shared.base_client import parse_connection_str, parse_query, StorageAccountHostsMixin, TransportWrapper
+from ._shared.request_handlers import add_metadata_headers
+from ._shared.response_handlers import process_storage_error, return_response_headers
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import DirectoryProperties, FileProperties, NTFSAttributes
+
+
+class ShareDirectoryClient(StorageAccountHostsMixin):
+    """A client to interact with a specific directory, although it may not yet exist.
+
+    For operations relating to a specific subdirectory or file in this share, the clients for those
+    entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the directory,
+        use the :func:`from_directory_url` classmethod.
+    :param share_name:
+        The name of the share for the directory.
+    :type share_name: str
+    :param str directory_path:
+        The directory path for the directory with which to interact.
+        If specified, this value will override a directory value specified in the directory URL.
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `TokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        directory_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an TokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.directory_path = directory_path
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareDirectoryClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_directory_url(
+        cls, directory_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create a ShareDirectoryClient from a directory url.
+
+        :param str directory_url:
+            The full URI to the directory.
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        account_url, share_name, directory_path, snapshot = _from_directory_url(directory_url, snapshot)
+        return cls(
+            account_url=account_url, share_name=share_name, directory_path=directory_path,
+            snapshot=snapshot, credential=credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including the current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self.directory_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        directory_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareDirectoryClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str directory_path:
+            The directory path.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            Optional[Union[str, dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]]
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs)
+
+    def get_file_client(self, file_name: str, **kwargs: Any) -> ShareFileClient:
+        """Get a client to interact with a specific file.
+
+        The file need not already exist.
+
+        :param str file_name: The name of the file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if self.directory_path:
+            file_name = self.directory_path.rstrip('/') + "/" + file_name
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # pylint: disable=protected-access
+        )
+        return ShareFileClient(
+            self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot,
+            credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode,
+            allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, **kwargs)
+
+    def get_subdirectory_client(self, directory_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """Get a client to interact with a specific subdirectory.
+
+        The subdirectory need not already exist.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START get_subdirectory_client]
+                :end-before: [END get_subdirectory_client]
+                :language: python
+                :dedent: 12
+                :caption: Gets the subdirectory client.
+        """
+        directory_path = directory_name
+        if self.directory_path:
+            directory_path = self.directory_path.rstrip('/') + "/" + directory_name
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # pylint: disable=protected-access
+        )
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot,
+            credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, **kwargs)
+
+    @distributed_trace
+    def create_directory(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new directory under the directory referenced by the client.
+
+        :keyword file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "none" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :keyword file_creation_time: Creation time for the directory.
+        :paramtype file_creation_time: str or ~datetime.datetime or None
+        :keyword file_last_write_time: Last write time for the directory.
+        :paramtype file_last_write_time: str or ~datetime.datetime or None
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set
+            for the directory/file. This header can be used if Permission size is
+            <= 8KB, else file-permission-key header shall be used.
+            Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory/file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 12
+                :caption: Creates a directory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        file_attributes = kwargs.pop('file_attributes', None)
+        file_creation_time = kwargs.pop('file_creation_time', None)
+        file_last_write_time = kwargs.pop('file_last_write_time', None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        file_permission = kwargs.pop('file_permission', None)
+        file_permission_key = kwargs.pop('file_permission_key', None)
+        file_permission = _get_file_permission(file_permission, file_permission_key, None)
+
+        try:
+            return cast(Dict[str, Any], self._client.directory.create(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=file_permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def delete_directory(self, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 12
+                :caption: Deletes a directory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            self._client.directory.delete(timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def rename_directory(self, new_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            The new directory name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the directory.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the directory.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new Directory Client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new directory name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_dir_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_dir_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_dir_sas = self._query_str.strip('?')
+
+        new_directory_client = ShareDirectoryClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_dir_path,
+            credential=new_dir_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            new_directory_client._client.directory.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                destination_lease_access_conditions=destination_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_directory_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self,
+        name_starts_with: Optional[str] = None,
+        **kwargs: Any
+    ) -> ItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists all the directories and files under the directory.
+
+        :param str name_starts_with:
+            Filters the results to return only entities whose names
+            begin with the specified prefix.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START lists_directory]
+                :end-before: [END lists_directory]
+                :language: python
+                :dedent: 12
+                :caption: List directories and files.
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_files_and_directories_segment,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=DirectoryPropertiesPaged)
+
+    @distributed_trace
+    def list_handles(self, recursive: bool = False, **kwargs: Any) -> ItemPaged[Handle]:
+        """Lists opened handles on a directory or a file under the directory.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            recursive=recursive,
+            **kwargs)
+        return ItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace
+    def close_handle(self, handle: Union[str, Handle], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = self._client.directory.force_close_handles(
+                handle_id,
+                marker=None,
+                recursive=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def close_all_handles(self, recursive: bool = False, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = self._client.directory.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    recursive=recursive,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace
+    def get_directory_properties(self, **kwargs: Any) -> "DirectoryProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified directory. The data returned does not include the directory's
+        list of files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: DirectoryProperties
+        :rtype: ~azure.storage.fileshare.DirectoryProperties
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = cast("DirectoryProperties", self._client.directory.get_properties(
+                timeout=timeout,
+                cls=deserialize_directory_properties,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response
+
+    @distributed_trace
+    def set_directory_metadata(self, metadata: Dict[str, Any], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the directory.
+
+        Each call to this operation replaces all existing metadata
+        attached to the directory. To remove all metadata from the directory,
+        call this operation with an empty metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the directory as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], self._client.directory.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the directory exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            self._client.directory.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace
+    def set_http_headers(
+        self, file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the directory.
+
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], self._client.directory.set_properties(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def create_subdirectory(self, directory_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """Creates a new subdirectory and returns a client to interact
+        with the subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword metadata:
+            Name-value pairs associated with the subdirectory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START create_subdirectory]
+                :end-before: [END create_subdirectory]
+                :language: python
+                :dedent: 12
+                :caption: Create a subdirectory.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs)
+        return subdir
+
+    @distributed_trace
+    def delete_subdirectory(self, directory_name: str, **kwargs: Any) -> None:
+        """Deletes a subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START delete_subdirectory]
+                :end-before: [END delete_subdirectory]
+                :language: python
+                :dedent: 12
+                :caption: Delete a subdirectory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        subdir.delete_directory(timeout=timeout, **kwargs)
+
+    @distributed_trace
+    def upload_file(
+        self, file_name: str,
+        data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> ShareFileClient:
+        """Creates a new file in the directory and returns a ShareFileClient
+        to interact with the file.
+
+        :param str file_name:
+            The name of the file.
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use.
+        :keyword progress_hook:
+            A callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: ShareFileClient
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START upload_file_to_directory]
+                :end-before: [END upload_file_to_directory]
+                :language: python
+                :dedent: 12
+                :caption: Upload a file to a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        file_client.upload_file(
+            data,
+            length=length,
+            **kwargs)
+        return file_client
+
+    @distributed_trace
+    def delete_file(self, file_name: str, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is later
+        deleted during garbage collection.
+
+        :param str file_name:
+            The name of the file to delete.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory.py
+                :start-after: [START delete_file_in_directory]
+                :end-before: [END delete_file_in_directory]
+                :language: python
+                :dedent: 12
+                :caption: Delete a file in a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        file_client.delete_file(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client_helpers.py
new file mode 100644
index 00000000..cf37c24f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_directory_client_helpers.py
@@ -0,0 +1,61 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import (
+    Any, Dict, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote, urlparse
+
+from ._shared.base_client import parse_query
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+
+
+def _parse_url(account_url: str, share_name: str) -> "ParseResult":
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not share_name:
+        raise ValueError("Please specify a share name.")
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+    return parsed_url
+
+
+def _format_url(scheme: str, hostname: str, share_name: Union[str, bytes], dir_path: str, query_str: str) -> str:
+    if isinstance(share_name, str):
+        share_name = share_name.encode('UTF-8')
+    directory_path = ""
+    if dir_path:
+        directory_path = "/" + quote(dir_path, safe='~')
+    return f"{scheme}://{hostname}/{quote(share_name)}{directory_path}{query_str}"
+
+
+def _from_directory_url(
+    directory_url: str,
+    snapshot: Optional[Union[str, Dict[str, Any]]] = None
+) -> Tuple[str, str, str, Optional[Union[str, Dict[str, Any]]]]:
+    try:
+        if not directory_url.lower().startswith('http'):
+            directory_url = "https://" + directory_url
+    except AttributeError as exc:
+        raise ValueError("Directory URL must be a string.") from exc
+    parsed_url = urlparse(directory_url.rstrip('/'))
+    if not parsed_url.path and not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {directory_url}")
+    account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query
+    path_snapshot, _ = parse_query(parsed_url.query)
+
+    share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/')
+    share_name = unquote(share_name)
+    snapshot = snapshot or path_snapshot
+
+    return account_url, share_name, path_dir, snapshot
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_download.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_download.py
new file mode 100644
index 00000000..a37bca9a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_download.py
@@ -0,0 +1,524 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+import threading
+import warnings
+from io import BytesIO
+from typing import (
+    Any, Callable, Generator, IO, Iterator, Optional, Tuple,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError, ResourceModifiedError
+from azure.core.tracing.common import with_current_context
+from ._shared.request_handlers import validate_and_format_range_headers
+from ._shared.response_handlers import parse_length_from_content_range, process_storage_error
+
+if TYPE_CHECKING:
+    from ._generated.operations import FileOperations
+    from ._models import FileProperties
+    from ._shared.models import StorageConfiguration
+
+
+def process_content(data: Any) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+
+    try:
+        return b"".join(list(data))
+    except Exception as error:
+        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) from error
+
+
+class _ChunkDownloader(object):  # pylint: disable=too-many-instance-attributes
+    def __init__(
+        self, client: "FileOperations",
+        total_size: int,
+        chunk_size: int,
+        current_progress: int,
+        start_range: int,
+        end_range: int,
+        validate_content: bool,
+        etag: str,
+        stream: Any = None,
+        parallel: Optional[int] = None,
+        progress_hook: Optional[Callable[[int, Optional[int]], None]] = None,
+        **kwargs: Any
+    ) -> None:
+        self.client = client
+        self.etag = etag
+        # Information on the download range/chunk size
+        self.chunk_size = chunk_size
+        self.total_size = total_size
+        self.start_index = start_range
+        self.end_index = end_range
+
+        # The destination that we will write to
+        self.stream = stream
+        self.stream_lock = threading.Lock() if parallel else None
+        self.progress_lock = threading.Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # For a parallel download, the stream is always seekable, so we note down the current position
+        # in order to seek to the right place when out-of-order chunks come in
+        self.stream_start = stream.tell() if parallel else 0
+
+        # Download progress so far
+        self.progress_total = current_progress
+
+        # Parameters for each get operation
+        self.validate_content = validate_content
+        self.request_options = kwargs
+
+    def _calculate_range(self, chunk_start: int) -> Tuple[int, int]:
+        if chunk_start + self.chunk_size > self.end_index:
+            chunk_end = self.end_index
+        else:
+            chunk_end = chunk_start + self.chunk_size
+        return chunk_start, chunk_end
+
+    def get_chunk_offsets(self) -> Generator[int, None, None]:
+        index = self.start_index
+        while index < self.end_index:
+            yield index
+            index += self.chunk_size
+
+    def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data = self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            self._write_to_stream(chunk_data, chunk_start)
+            self._update_progress(length)
+
+    def yield_chunk(self, chunk_start: int) -> bytes:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return self._download_chunk(chunk_start, chunk_end - 1)
+
+    def _update_progress(self, length: int) -> None:
+        if self.progress_lock:
+            with self.progress_lock:  # pylint: disable=not-context-manager
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock:
+            with self.stream_lock:  # pylint: disable=not-context-manager
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    def _download_chunk(self, chunk_start: int, chunk_end: int) -> bytes:
+        range_header, range_validation = validate_and_format_range_headers(
+            chunk_start, chunk_end, check_content_md5=self.validate_content
+        )
+
+        try:
+            response: Any = None
+            _, response = self.client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self.validate_content,
+                data_stream_total=self.total_size,
+                download_stream_current=self.progress_total,
+                **self.request_options
+            )
+            if response.properties.etag != self.etag:
+                raise ResourceModifiedError(message="The file has been modified while downloading.")
+
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        chunk_data = process_content(response)
+        return chunk_data
+
+
+class _ChunkIterator(object):
+    """Iterator for chunks in file download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_ChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> Iterator[bytes]:
+        return self
+
+    def __next__(self) -> bytes:
+        if self._complete:
+            raise StopIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            chunk = next(self._iter_chunks)
+            self._current_content += self._iter_downloader.yield_chunk(chunk)
+        except StopIteration as e:
+            self._complete = True
+            if self._current_content:
+                return self._current_content
+            raise e
+
+        return self._get_chunk_data()
+
+    next = __next__  # Python 2 compatibility.
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
+    """A streaming object to download from Azure Storage."""
+
+    name: str
+    """The name of the file being downloaded."""
+    path: str
+    """The full path of the file."""
+    share: str
+    """The name of the share where the file is."""
+    properties: "FileProperties"
+    """The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file."""
+
+    def __init__(
+        self, client: "FileOperations" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        path: str = None,  # type: ignore [assignment]
+        share: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.path = path
+        self.share = share
+        self.size = 0
+
+        self._client = client
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._location_mode = None
+        self._download_complete = False
+        self._current_content = b""
+        self._file_size = 0
+        self._response = None
+        self._etag = ""
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        self._first_get_size = (
+            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
+        )
+        initial_request_start = self._start_range or 0
+        if self._end_range is not None and self._end_range - initial_request_start < self._first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + self._first_get_size - 1
+
+        self._initial_range = (initial_request_start, initial_request_end)
+
+        self._response = self._initial_request()
+        self.properties = self._response.properties
+        self.properties.name = self.name
+        self.properties.path = self.path
+        self.properties.share = self.share
+
+        # Set the content length to the download size instead of the size of
+        # the last range
+        self.properties.size = self.size
+
+        # Overwrite the content range to the user requested range
+        self.properties.content_range = f"bytes {self._start_range}-{self._end_range}/{self._file_size}"
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+        if self.size == 0:
+            self._current_content = b""
+        else:
+            self._current_content = process_content(self._response)
+
+    def __len__(self) -> int:
+        return self.size
+
+    def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content
+        )
+
+        try:
+            location_mode, response = self._client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self._validate_content,
+                data_stream_total=None,
+                download_stream_current=0,
+                **self._request_options
+            )
+
+            # Check the location we read from to ensure we use the same one
+            # for subsequent requests.
+            self._location_mode = location_mode
+
+            # Parse the total file size and adjust the download size if ranges
+            # were specified
+            self._file_size = parse_length_from_content_range(response.properties.content_range)
+            if self._file_size is None:
+                raise ValueError("Required Content-Range response header is missing or malformed.")
+
+            if self._end_range is not None:
+                # Use the end range index unless it is over the end of the file
+                self.size = min(self._file_size, self._end_range - self._start_range + 1)
+            elif self._start_range is not None:
+                self.size = self._file_size - self._start_range
+            else:
+                self.size = self._file_size
+
+        except HttpResponseError as error:
+            if self._start_range is None and error.response and error.response.status_code == 416:
+                # Get range will fail on an empty file. If the user did not
+                # request a range, do a regular get request in order to get
+                # any properties.
+                try:
+                    _, response = self._client.download(
+                        validate_content=self._validate_content,
+                        data_stream_total=0,
+                        download_stream_current=0,
+                        **self._request_options
+                    )
+                except HttpResponseError as e:
+                    process_storage_error(e)
+
+                # Set the download size to empty
+                self.size = 0
+                self._file_size = 0
+            else:
+                process_storage_error(error)
+
+        # If the file is small, the download is complete at this point.
+        # If file size is large, download the rest of the file in chunks.
+        if response.properties.size == self.size:
+            self._download_complete = True
+        self._etag = response.properties.etag
+        return response
+
+    def chunks(self) -> Iterator[bytes]:
+        """
+        Iterate over chunks in the download stream.
+
+        :return: An iterator of the chunks in the download stream.
+        :rtype: Iterator[bytes]
+        """
+        if self.size == 0 or self._download_complete:
+            iter_downloader = None
+        else:
+            data_end = self._file_size
+            if self._end_range is not None:
+                # Use the end range index unless it is over the end of the file
+                data_end = min(self._file_size, self._end_range + 1)
+            iter_downloader = _ChunkDownloader(
+                client=self._client,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._first_get_size,
+                start_range=self._initial_range[1] + 1,  # start where the first download ended
+                end_range=data_end,
+                stream=None,
+                parallel=False,
+                validate_content=self._validate_content,
+                use_location=self._location_mode,
+                etag=self._etag,
+                **self._request_options
+            )
+        return _ChunkIterator(
+            size=self.size,
+            content=self._current_content,
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size)
+
+    def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :return: The entire blob content as bytes.
+        :rtype: bytes
+        """
+        stream = BytesIO()
+        self.readinto(stream)
+        data = stream.getvalue()
+        if self._encoding:
+            return data.decode(self._encoding)  # type: ignore [return-value]
+        return data
+
+    def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :return: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        return self.readall()
+
+    def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this file, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :return: The contents of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return self.readall()
+
+    def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        # The stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # Write the content to the user stream
+        stream.write(self._current_content)
+        if self._progress_hook:
+            self._progress_hook(len(self._current_content), self.size)
+
+        if self._download_complete:
+            return self.size
+
+        data_end = self._file_size
+        if self._end_range is not None:
+            # Use the length unless it is over the end of the file
+            data_end = min(self._file_size, self._end_range + 1)
+
+        downloader = _ChunkDownloader(
+            client=self._client,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._first_get_size,
+            start_range=self._initial_range[1] + 1,  # Start where the first download ended
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            etag=self._etag,
+            **self._request_options
+        )
+        if parallel:
+            import concurrent.futures
+            with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor:
+                list(executor.map(
+                        with_current_context(downloader.process_chunk),
+                        downloader.get_chunk_offsets()
+                    ))
+        else:
+            for chunk in downloader.get_chunk_offsets():
+                downloader.process_chunk(chunk)
+        return self.size
+
+    def download_to_stream(self, stream, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded file.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client.py
new file mode 100644
index 00000000..e2fc4f11
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client.py
@@ -0,0 +1,1739 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only, too-many-lines, too-many-public-methods
+
+import functools
+import sys
+import time
+from datetime import datetime
+from io import BytesIO
+from typing import (
+    Any, AnyStr, Callable, cast, Dict, IO, Iterable, List, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.paging import ItemPaged
+from azure.core.tracing.decorator import distributed_trace
+from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result
+from ._download import StorageStreamDownloader
+from ._file_client_helpers import (
+    _format_url,
+    _from_file_url,
+    _get_ranges_options,
+    _parse_url,
+    _upload_range_from_url_options
+)
+from ._generated import AzureFileStorage
+from ._generated.models import FileHTTPHeaders
+from ._lease import ShareLeaseClient
+from ._models import FileProperties, Handle, HandlesPaged
+from ._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from ._serialize import (
+    get_access_conditions,
+    get_api_version,
+    get_dest_access_conditions,
+    get_rename_smb_properties,
+    get_smb_properties,
+    get_source_access_conditions
+)
+from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query
+from ._shared.request_handlers import add_metadata_headers, get_length
+from ._shared.response_handlers import return_response_headers, process_storage_error
+from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import ContentSettings, NTFSAttributes
+    from ._shared.base_client import StorageConfiguration
+
+
+def _upload_file_helper(
+    client: "ShareFileClient",
+    stream: Any,
+    size: Optional[int],
+    metadata: Optional[Dict[str, str]],
+    content_settings: Optional["ContentSettings"],
+    validate_content: bool,
+    timeout: Optional[int],
+    max_concurrency: int,
+    file_settings: "StorageConfiguration",
+    file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+    file_creation_time: Optional[Union[str, datetime]] = None,
+    file_last_write_time: Optional[Union[str, datetime]] = None,
+    file_permission: Optional[str] = None,
+    file_permission_key: Optional[str] = None,
+    progress_hook: Optional[Callable[[int, Optional[int]], None]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if size is None or size < 0:
+            raise ValueError("A content size must be specified for a File.")
+        response = client.create_file(
+            size,
+            content_settings=content_settings,
+            metadata=metadata,
+            timeout=timeout,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            permission_key=file_permission_key,
+            **kwargs
+        )
+        if size == 0:
+            return response
+
+        responses = upload_data_chunks(
+            service=client,
+            uploader_class=FileChunkUploader,
+            total_size=size,
+            chunk_size=file_settings.max_range_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            timeout=timeout,
+            **kwargs
+        )
+        return cast(Dict[str, Any], sorted(responses, key=lambda r: r.get('last_modified'))[-1])
+    except HttpResponseError as error:
+        process_storage_error(error)
+
+
+class ShareFileClient(StorageAccountHostsMixin):
+    """A client to interact with a specific file, although that file may not yet exist.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the
+        file, use the :func:`from_file_url` classmethod.
+    :param share_name:
+        The name of the share for the file.
+    :type share_name: str
+    :param str file_path:
+        The file path to the file with which to interact. If specified, this value will override
+        a file value specified in the file URL.
+    :param str snapshot:
+        An optional file snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `TokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an TokenCredential.")
+        parsed_url = _parse_url(account_url, share_name, file_path)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.file_path = file_path.split('/')
+        self.file_name = self.file_path[-1]
+        self.directory_path = "/".join(self.file_path[:-1])
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareFileClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_file_url(
+        cls, file_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """A client to interact with a specific file, although that file may not yet exist.
+
+        :param str file_url: The full URI to the file.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        account_url, share_name, file_path, snapshot = _from_file_url(file_url, snapshot)
+        return cls(account_url, share_name, file_path, snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str):
+        return _format_url(self.scheme, hostname, self.share_name, self.file_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareFileClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str file_path:
+            The file path.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world.py
+                :start-after: [START create_file_client]
+                :end-before: [END create_file_client]
+                :language: python
+                :dedent: 12
+                :caption: Creates the file client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)
+
+    @distributed_trace
+    def acquire_lease(self, lease_id: Optional[str] = None, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the file does not have an active lease, the File
+        Service creates a lease on the blob and returns a new lease.
+
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The File Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.ShareLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START acquire_and_release_lease_on_file]
+                :end-before: [END acquire_and_release_lease_on_file]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on a file.
+        """
+        kwargs['lease_duration'] = -1
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace
+    def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if the file exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the file exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            self._client.file.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace
+    def create_file(
+        self, size: int,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new file.
+
+        Note that it only initializes the file with no content.
+
+        :param int size: Specifies the maximum size for the file,
+            up to 1 TB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+            Default value: Now.
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+            Default value: Now.
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 12
+                :caption: Create a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        content_settings = kwargs.pop('content_settings', None)
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        file_http_headers = None
+        if content_settings:
+            file_http_headers = FileHTTPHeaders(
+                file_cache_control=content_settings.cache_control,
+                file_content_type=content_settings.content_type,
+                file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+                file_content_encoding=content_settings.content_encoding,
+                file_content_language=content_settings.content_language,
+                file_content_disposition=content_settings.content_disposition
+            )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], self._client.file.create(
+                file_content_length=size,
+                metadata=metadata,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                file_http_headers=file_http_headers,
+                lease_access_conditions=access_conditions,
+                headers=headers,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_file(
+        self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs
+    ) -> Dict[str, Any]:
+        """Uploads a new file.
+
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
+        :param file_creation_time: Creation time for the file
+            Default value: Now.
+        :type file_creation_time: str or ~datetime.datetime
+        :param file_last_write_time: Last write time for the file
+            Default value: Now.
+        :type file_last_write_time: str or ~datetime.datetime
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword progress_hook:
+            A callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START upload_file]
+                :end-before: [END upload_file]
+                :language: python
+                :dedent: 12
+                :caption: Upload a file.
+        """
+        metadata = kwargs.pop('metadata', None)
+        content_settings = kwargs.pop('content_settings', None)
+        max_concurrency = kwargs.pop('max_concurrency', 1)
+        validate_content = kwargs.pop('validate_content', False)
+        progress_hook = kwargs.pop('progress_hook', None)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        if length is None:
+            length = get_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        stream: Optional[Any] = None
+        if isinstance(data, bytes):
+            stream = BytesIO(data)
+        elif hasattr(data, 'read'):
+            stream = data
+        elif hasattr(data, '__iter__'):
+            stream = IterStreamer(data, encoding=encoding)
+        else:
+            raise TypeError(f"Unsupported data type: {type(data)}")
+        return _upload_file_helper(
+            self,
+            stream,
+            length,
+            metadata,
+            content_settings,
+            validate_content,
+            timeout,
+            max_concurrency,
+            self._config,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            file_permission_key=permission_key,
+            progress_hook=progress_hook,
+            **kwargs)
+
+    @distributed_trace
+    def start_copy_from_url(self, source_url: str, **kwargs: Any) -> Dict[str, Any]:
+        """Initiates the copying of data from a source URL into the file
+        referenced by the client.
+
+        The status of this copy operation can be found using the `get_properties`
+        method.
+
+        :param str source_url:
+            Specifies the URL of the source file.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file. This setting can be
+            used if Permission size is <= 8KB, otherwise permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword str permission_key:
+            Key of the permission to be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            This value can be set to "source" to copy file attributes from the source file to the target file,
+            or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes
+            to set on the target file. If this is not set, the default value is "Archive".
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
+        :keyword file_creation_time:
+            This value can be set to "source" to copy the creation time from the source file to the target file,
+            or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, creation time will be set to the date time value of the creation
+            (or when it was overwritten) of the target file by copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_creation_time: str or ~datetime.datetime
+        :keyword file_last_write_time:
+            This value can be set to "source" to copy the last write time from the source file to the target file, or
+            a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, value will be the last write time to the file by the copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_last_write_time: str or ~datetime.datetime
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.9.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword bool ignore_read_only:
+            Specifies the option to overwrite the target file if it already exists and has read-only attribute set.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword bool set_archive_attribute:
+            Specifies the option to set the archive attribute on the target file.
+            True means the archive attribute will be set on the target file despite attribute
+            overrides or the source file state.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword file_mode_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the mode bits of the file. Possible values are:
+
+            source - The mode on the destination file is copied from the source file.
+            override - The mode on the destination file is determined via the file_mode keyword.
+        :paramtype file_mode_copy_mode: Literal['source', 'override']
+        :keyword owner_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the owner and group of the file. Possible values are:
+
+            source - The owner and group on the destination file is copied from the source file.
+            override - The owner and group on the destination file is determined via the owner and group keywords.
+        :paramtype owner_copy_mode: Literal['source', 'override']
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Response after data copying operation has been initiated.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START copy_file_from_url]
+                :end-before: [END copy_file_from_url]
+                :language: python
+                :dedent: 12
+                :caption: Copy a file from a URL
+        """
+        metadata = kwargs.pop('metadata', None)
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        owner = kwargs.pop('owner', None)
+        group = kwargs.pop('group', None)
+        file_mode = kwargs.pop('file_mode', None)
+        file_mode_copy_mode = kwargs.pop('file_mode_copy_mode', None)
+        file_owner_copy_mode = kwargs.pop('owner_copy_mode', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        kwargs.update(get_smb_properties(kwargs))
+        try:
+            return cast(Dict[str, Any], self._client.file.start_copy(
+                source_url,
+                metadata=metadata,
+                lease_access_conditions=access_conditions,
+                owner=owner,
+                group=group,
+                file_mode=file_mode,
+                file_mode_copy_mode=file_mode_copy_mode,
+                file_owner_copy_mode=file_owner_copy_mode,
+                headers=headers,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def abort_copy(self, copy_id: Union[str, FileProperties], **kwargs: Any) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination file with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID, or an
+            instance of FileProperties.
+        :type copy_id: str or ~azure.storage.fileshare.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+
+        if isinstance(copy_id, FileProperties):
+            copy_id = copy_id.copy.id
+        elif isinstance(copy_id, Dict):
+            copy_id = copy_id['copy_id']
+        try:
+            self._client.file.abort_copy(copy_id=copy_id,
+                                         lease_access_conditions=access_conditions,
+                                         timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def download_file(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader:
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the file into
+        a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword progress_hook:
+            A callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], None]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.fileshare.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START download_file]
+                :end-before: [END download_file]
+                :language: python
+                :dedent: 12
+                :caption: Download a file.
+        """
+        range_end = None
+        if length is not None:
+            if offset is None:
+                raise ValueError("Offset value must not be None if length is set.")
+            range_end = offset + length - 1  # Service actually uses an end-range inclusive index
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+
+        return StorageStreamDownloader(
+            client=self._client.file,
+            config=self._config,
+            start_range=offset,
+            end_range=range_end,
+            name=self.file_name,
+            path='/'.join(self.file_path),
+            share=self.share_name,
+            lease_access_conditions=access_conditions,
+            cls=deserialize_file_stream,
+            **kwargs)
+
+    @distributed_trace
+    def delete_file(self, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is
+        later deleted during garbage collection.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 12
+                :caption: Delete a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def rename_file(self, new_name: str, **kwargs: Any) -> "ShareFileClient":
+        """
+        Rename the source file.
+
+        :param str new_name:
+            The new file name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the file. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the file.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the file.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str content_type:
+            The Content Type of the new file.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword source_lease:
+            Required if the source file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new file name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_file_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_file_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_file_sas = self._query_str.strip('?')
+
+        new_file_client = ShareFileClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_file_path,
+            credential=new_file_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        file_http_headers = None
+        content_type = kwargs.pop('content_type', None)
+        if content_type:
+            file_http_headers = FileHTTPHeaders(
+                file_content_type=content_type
+            )
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None))
+        dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            new_file_client._client.file.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                file_http_headers=file_http_headers,
+                source_lease_access_conditions=source_access_conditions,
+                destination_lease_access_conditions=dest_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_file_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_file_properties(self, **kwargs: Any) -> FileProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: FileProperties
+        :rtype: ~azure.storage.fileshare.FileProperties
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            file_props = cast(FileProperties, self._client.file.get_properties(
+                sharesnapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=deserialize_file_properties,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        file_props.name = self.file_name
+        file_props.share = self.share_name
+        file_props.snapshot = self.snapshot
+        file_props.path = '/'.join(self.file_path)
+        return file_props
+
+    @distributed_trace
+    def set_http_headers(
+        self, content_settings: "ContentSettings",
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the file.
+
+        :param ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+            Default value: Preserve.
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+            Default value: Preserve.
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        file_content_length = kwargs.pop('size', None)
+        file_http_headers = FileHTTPHeaders(
+            file_cache_control=content_settings.cache_control,
+            file_content_type=content_settings.content_type,
+            file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+            file_content_encoding=content_settings.content_encoding,
+            file_content_language=content_settings.content_language,
+            file_content_disposition=content_settings.content_disposition
+        )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], self._client.file.set_http_headers(
+                file_content_length=file_content_length,
+                file_http_headers=file_http_headers,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_file_metadata(self, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """Sets user-defined metadata for the specified file as one or more
+        name-value pairs.
+
+        Each call to this operation replaces all existing metadata
+        attached to the file. To remove all metadata from the file,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], self._client.file.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                metadata=metadata,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_range(
+        self, data: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Upload a range of bytes to a file.
+
+        :param bytes data:
+            The data to upload.
+        :param int offset:
+            Start of byte range to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        validate_content = kwargs.pop('validate_content', False)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        file_last_write_mode = kwargs.pop('file_last_write_mode', None)
+        if isinstance(data, str):
+            data = data.encode(encoding)
+
+        end_range = offset + length - 1  # Reformat to an inclusive range index
+        content_range = f'bytes={offset}-{end_range}'
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        try:
+            return cast(Dict[str, Any], self._client.file.upload_range(
+                range=content_range,
+                content_length=length,
+                optionalbody=data,
+                timeout=timeout,
+                validate_content=validate_content,
+                file_last_written_mode=file_last_write_mode,
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def upload_range_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint.
+
+        :param int offset:
+            Start of byte range to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies an Azure file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.file.core.windows.net/myshare/mydir/myfile
+            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after writing to the specified range of the destination Azure File endpoint.
+        :rtype: dict[str, Any]
+        """
+        options = _upload_range_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], self._client.file.upload_range_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> List[Dict[str, int]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A list of valid ranges.
+        :rtype: List[dict[str, int]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            ranges = self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges]
+
+    @distributed_trace
+    def get_ranges_diff(
+        self, previous_sharesnapshot: Union[str, Dict[str, Any]],
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        include_renames: Optional[bool] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        .. versionadded:: 12.6.0
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :param str previous_sharesnapshot:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous file snapshot to be compared
+            against a more recent snapshot or the current file.
+        :keyword Optional[bool] include_renames:
+            Only valid if previous_sharesnapshot parameter is provided. Specifies whether the changed ranges for
+            a file that has been renamed or moved between the target snapshot (or live file) and the previous
+            snapshot should be listed. If set to True, the valid changed ranges for the file will be returned.
+            If set to False, the operation will result in a 409 (Conflict) response.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled file ranges, the 2nd element is cleared file ranges.
+        :rtype: tuple[list[dict[str, str], list[dict[str, str]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_sharesnapshot=previous_sharesnapshot,
+            support_rename=include_renames,
+            **kwargs)
+        try:
+            ranges = self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_file_ranges_result(ranges)
+
+    @distributed_trace
+    def clear_range(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Any]:
+        """Clears the specified range and releases the space used in storage for
+        that range.
+
+        :param int offset:
+            Start of byte range to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+
+        if offset is None or offset % 512 != 0:
+            raise ValueError("offset must be an integer that aligns with 512 bytes file size")
+        if length is None or length % 512 != 0:
+            raise ValueError("length must be an integer that aligns with 512 bytes file size")
+        end_range = length + offset - 1  # Reformat to an inclusive range index
+        content_range = f'bytes={offset}-{end_range}'
+        try:
+            return cast(Dict[str, Any], self._client.file.upload_range(
+                timeout=timeout,
+                cls=return_response_headers,
+                content_length=0,
+                optionalbody=None,
+                file_range_write="clear",
+                range=content_range,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def resize_file(self, size: int, **kwargs: Any) -> Dict[str, Any]:
+        """Resizes a file to the specified size.
+
+        :param int size:
+            Size to resize file to (in bytes)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], self._client.file.set_http_headers(
+                file_content_length=size,
+                file_attributes=None,
+                file_creation_time=None,
+                file_last_write_time=None,
+                file_permission="preserve",
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_handles(self, **kwargs: Any) -> ItemPaged[Handle]:
+        """Lists handles for file.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.file.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace
+    def close_handle(self, handle: Union[str, Handle], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = self._client.file.force_close_handles(
+                handle_id,
+                marker=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def close_all_handles(self, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = self._client.file.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace
+    def create_hardlink(
+        self, target: str,
+        *,
+        lease: Optional[Union[ShareLeaseClient, str]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """NFS only. Creates a hard link to the file specified by path.
+
+        :param str target:
+            Specifies the path of the target file to which the link will be created, up to 2 KiB in length.
+            It should be the full path of the target starting from the root. The target file must be in the
+            same share and the same storage account.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (ETag and last modified).
+        :rtype: dict[str, Any]
+        """
+        try:
+            return cast(Dict[str, Any], self._client.file.create_hard_link(
+                target_file=target,
+                lease_access_conditions=lease,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client_helpers.py
new file mode 100644
index 00000000..ea225f3c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_file_client_helpers.py
@@ -0,0 +1,145 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import (
+    Any, Dict, List, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote, urlparse
+
+from ._serialize import get_access_conditions, get_source_conditions
+from ._shared.base_client import parse_query
+from ._shared.response_handlers import return_response_headers
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+
+
+def _parse_url(account_url: str, share_name: str, file_path: str) -> "ParseResult":
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not (share_name and file_path):
+        raise ValueError("Please specify a share name and file name.")
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+    return parsed_url
+
+
+def _from_file_url(
+    file_url: str,
+    snapshot: Optional[Union[str, Dict[str, Any]]] = None
+) -> Tuple[str, str, str, Optional[Union[str, Dict[str, Any]]]]:
+    try:
+        if not file_url.lower().startswith('http'):
+            file_url = "https://" + file_url
+    except AttributeError as exc:
+        raise ValueError("File URL must be a string.") from exc
+    parsed_url = urlparse(file_url.rstrip('/'))
+
+    if not (parsed_url.netloc and parsed_url.path):
+        raise ValueError(f"Invalid URL: {file_url}")
+    account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query
+
+    path_share, _, path_file = parsed_url.path.lstrip('/').partition('/')
+    path_snapshot, _ = parse_query(parsed_url.query)
+    snapshot = snapshot or path_snapshot
+    share_name = unquote(path_share)
+    file_path = '/'.join([unquote(p) for p in path_file.split('/')])
+
+    return account_url, share_name, file_path, snapshot
+
+
+def _format_url(
+    scheme: str,
+    hostname: str,
+    share_name: Union[str, bytes],
+    file_path: List[str],
+    query_str: str
+) -> str:
+    if isinstance(share_name, str):
+        share_name = share_name.encode('UTF-8')
+    return (f"{scheme}://{hostname}/{quote(share_name)}"
+            f"/{'/'.join([quote(p, safe='~') for p in file_path])}{query_str}")
+
+
+def _upload_range_from_url_options(
+    source_url: str,
+    offset: int,
+    length: int,
+    source_offset: int,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    if offset is None:
+        raise ValueError("offset must be provided.")
+    if length is None:
+        raise ValueError("length must be provided.")
+    if source_offset is None:
+        raise ValueError("source_offset must be provided.")
+
+    # Format range
+    end_range = offset + length - 1
+    destination_range = f'bytes={offset}-{end_range}'
+    source_range = f'bytes={source_offset}-{source_offset + length - 1}'
+    source_authorization = kwargs.pop('source_authorization', None)
+    source_mod_conditions = get_source_conditions(kwargs)
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+    file_last_write_mode = kwargs.pop('file_last_write_mode', None)
+
+    options = {
+        'copy_source_authorization': source_authorization,
+        'copy_source': source_url,
+        'content_length': 0,
+        'source_range': source_range,
+        'range': destination_range,
+        'file_last_written_mode': file_last_write_mode,
+        'source_modified_access_conditions': source_mod_conditions,
+        'lease_access_conditions': access_conditions,
+        'timeout': kwargs.pop('timeout', None),
+        'cls': return_response_headers
+    }
+
+    options.update(kwargs)
+    return options
+
+
+def _get_ranges_options(
+    snapshot: Optional[str],
+    offset: Optional[int] = None,
+    length: Optional[int] = None,
+    previous_sharesnapshot: Optional[Union[str, Dict[str, Any]]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    access_conditions = get_access_conditions(kwargs.pop('lease', None))
+
+    content_range = None
+    if offset:
+        if length:
+            end_range = offset + length - 1  # Reformat to an inclusive range index
+            content_range = f'bytes={offset}-{end_range}'
+        else:
+            content_range = f'bytes={offset}-'
+
+    options = {
+        'sharesnapshot': snapshot,
+        'lease_access_conditions': access_conditions,
+        'timeout': kwargs.pop('timeout', None),
+        'range': content_range
+    }
+
+    if previous_sharesnapshot:
+        if hasattr(previous_sharesnapshot, 'snapshot'):
+            options['prevsharesnapshot'] = previous_sharesnapshot.snapshot
+        elif isinstance(previous_sharesnapshot, Dict):
+            options['prevsharesnapshot'] = previous_sharesnapshot['snapshot']
+        else:
+            options['prevsharesnapshot'] = previous_sharesnapshot
+
+    options.update(kwargs)
+    return options
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/__init__.py
new file mode 100644
index 00000000..b4f1dd31
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_file_storage import AzureFileStorage  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureFileStorage",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_azure_file_storage.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_azure_file_storage.py
new file mode 100644
index 00000000..8a332771
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_azure_file_storage.py
@@ -0,0 +1,130 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Optional, Union
+from typing_extensions import Self
+
+from azure.core import PipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest, HttpResponse
+
+from . import models as _models
+from ._configuration import AzureFileStorageConfiguration
+from ._serialization import Deserializer, Serializer
+from .operations import DirectoryOperations, FileOperations, ServiceOperations, ShareOperations
+
+
+class AzureFileStorage:  # pylint: disable=client-accepts-api-version-keyword
+    """AzureFileStorage.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.fileshare.operations.ServiceOperations
+    :ivar share: ShareOperations operations
+    :vartype share: azure.storage.fileshare.operations.ShareOperations
+    :ivar directory: DirectoryOperations operations
+    :vartype directory: azure.storage.fileshare.operations.DirectoryOperations
+    :ivar file: FileOperations operations
+    :vartype file: azure.storage.fileshare.operations.FileOperations
+    :param url: The URL of the service account, share, directory or file that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param file_request_intent: Valid value is backup. "backup" Default value is None.
+    :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent
+    :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+     Default value is None.
+    :type allow_trailing_dot: bool
+    :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source
+     URI. Default value is None.
+    :type allow_source_trailing_dot: bool
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-05-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes
+     downloaded from the source url into the specified range. Default value is "update". Note that
+     overriding this default value may result in unsupported behavior.
+    :paramtype file_range_write_from_url: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self,
+        url: str,
+        base_url: str = "",
+        file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+        allow_trailing_dot: Optional[bool] = None,
+        allow_source_trailing_dot: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        self._config = AzureFileStorageConfiguration(
+            url=url,
+            file_request_intent=file_request_intent,
+            allow_trailing_dot=allow_trailing_dot,
+            allow_source_trailing_dot=allow_source_trailing_dot,
+            **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: PipelineClient = PipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.share = ShareOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.directory = DirectoryOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file = FileOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = client._send_request(request)
+        <HttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.HttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    def close(self) -> None:
+        self._client.close()
+
+    def __enter__(self) -> Self:
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *exc_details: Any) -> None:
+        self._client.__exit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_configuration.py
new file mode 100644
index 00000000..6b42bcdf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_configuration.py
@@ -0,0 +1,77 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional, Union
+
+from azure.core.pipeline import policies
+
+from . import models as _models
+
+VERSION = "unknown"
+
+
+class AzureFileStorageConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureFileStorage.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, share, directory or file that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param file_request_intent: Valid value is backup. "backup" Default value is None.
+    :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent
+    :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+     Default value is None.
+    :type allow_trailing_dot: bool
+    :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source
+     URI. Default value is None.
+    :type allow_source_trailing_dot: bool
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-05-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes
+     downloaded from the source url into the specified range. Default value is "update". Note that
+     overriding this default value may result in unsupported behavior.
+    :paramtype file_range_write_from_url: str
+    """
+
+    def __init__(
+        self,
+        url: str,
+        file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+        allow_trailing_dot: Optional[bool] = None,
+        allow_source_trailing_dot: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        version: Literal["2025-05-05"] = kwargs.pop("version", "2025-05-05")
+        file_range_write_from_url: Literal["update"] = kwargs.pop("file_range_write_from_url", "update")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.file_request_intent = file_request_intent
+        self.allow_trailing_dot = allow_trailing_dot
+        self.allow_source_trailing_dot = allow_source_trailing_dot
+        self.version = version
+        self.file_range_write_from_url = file_range_write_from_url
+        kwargs.setdefault("sdk_moniker", "azurefilestorage/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_serialization.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_serialization.py
new file mode 100644
index 00000000..a066e16a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/_serialization.py
@@ -0,0 +1,2050 @@
+# pylint: disable=too-many-lines
+# --------------------------------------------------------------------------
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+#
+# The MIT License (MIT)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the ""Software""), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+    Dict,
+    Any,
+    cast,
+    Optional,
+    Union,
+    AnyStr,
+    IO,
+    Mapping,
+    Callable,
+    MutableMapping,
+    List,
+)
+
+try:
+    from urllib import quote  # type: ignore
+except ImportError:
+    from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate  # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+    # Accept "text" because we're open minded people...
+    JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+    # Name used in context
+    CONTEXT_NAME = "deserialized_data"
+
+    @classmethod
+    def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+        """Decode data according to content-type.
+
+        Accept a stream of data as well, but will be load at once in memory for now.
+
+        If no content-type, will return the string version (not bytes, not stream)
+
+        :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+        :type data: str or bytes or IO
+        :param str content_type: The content type.
+        :return: The deserialized data.
+        :rtype: object
+        """
+        if hasattr(data, "read"):
+            # Assume a stream
+            data = cast(IO, data).read()
+
+        if isinstance(data, bytes):
+            data_as_str = data.decode(encoding="utf-8-sig")
+        else:
+            # Explain to mypy the correct type.
+            data_as_str = cast(str, data)
+
+            # Remove Byte Order Mark if present in string
+            data_as_str = data_as_str.lstrip(_BOM)
+
+        if content_type is None:
+            return data
+
+        if cls.JSON_REGEXP.match(content_type):
+            try:
+                return json.loads(data_as_str)
+            except ValueError as err:
+                raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+        elif "xml" in (content_type or []):
+            try:
+
+                try:
+                    if isinstance(data, unicode):  # type: ignore
+                        # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+                        data_as_str = data_as_str.encode(encoding="utf-8")  # type: ignore
+                except NameError:
+                    pass
+
+                return ET.fromstring(data_as_str)  # nosec
+            except ET.ParseError as err:
+                # It might be because the server has an issue, and returned JSON with
+                # content-type XML....
+                # So let's try a JSON load, and if it's still broken
+                # let's flow the initial exception
+                def _json_attemp(data):
+                    try:
+                        return True, json.loads(data)
+                    except ValueError:
+                        return False, None  # Don't care about this one
+
+                success, json_result = _json_attemp(data)
+                if success:
+                    return json_result
+                # If i'm here, it's not JSON, it's not XML, let's scream
+                # and raise the last context in this block (the XML exception)
+                # The function hack is because Py2.7 messes up with exception
+                # context otherwise.
+                _LOGGER.critical("Wasn't XML not JSON, failing")
+                raise DeserializationError("XML is invalid") from err
+        elif content_type.startswith("text/"):
+            return data_as_str
+        raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+    @classmethod
+    def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+        """Deserialize from HTTP response.
+
+        Use bytes and headers to NOT use any requests/aiohttp or whatever
+        specific implementation.
+        Headers will tested for "content-type"
+
+        :param bytes body_bytes: The body of the response.
+        :param dict headers: The headers of the response.
+        :returns: The deserialized data.
+        :rtype: object
+        """
+        # Try to use content-type from headers if available
+        content_type = None
+        if "content-type" in headers:
+            content_type = headers["content-type"].split(";")[0].strip().lower()
+        # Ouch, this server did not declare what it sent...
+        # Let's guess it's JSON...
+        # Also, since Autorest was considering that an empty body was a valid JSON,
+        # need that test as well....
+        else:
+            content_type = "application/json"
+
+        if body_bytes:
+            return cls.deserialize_from_text(body_bytes, content_type)
+        return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+    _long_type = long  # type: ignore
+except NameError:
+    _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(?<!\\)\.")
+
+
+def attribute_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the Python attribute.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A key using attribute name
+    :rtype: str
+    """
+    return (key, value)
+
+
+def full_restapi_key_transformer(key, attr_desc, value):  # pylint: disable=unused-argument
+    """A key transformer that returns the full RestAPI key path.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: A list of keys using RestAPI syntax.
+    :rtype: list
+    """
+    keys = _FLATTEN.split(attr_desc["key"])
+    return ([_decode_attribute_map_key(k) for k in keys], value)
+
+
+def last_restapi_key_transformer(key, attr_desc, value):
+    """A key transformer that returns the last RestAPI key.
+
+    :param str key: The attribute name
+    :param dict attr_desc: The attribute metadata
+    :param object value: The value
+    :returns: The last RestAPI key.
+    :rtype: str
+    """
+    key, value = full_restapi_key_transformer(key, attr_desc, value)
+    return (key[-1], value)
+
+
+def _create_xml_node(tag, prefix=None, ns=None):
+    """Create a XML node.
+
+    :param str tag: The tag name
+    :param str prefix: The prefix
+    :param str ns: The namespace
+    :return: The XML node
+    :rtype: xml.etree.ElementTree.Element
+    """
+    if prefix and ns:
+        ET.register_namespace(prefix, ns)
+    if ns:
+        return ET.Element("{" + ns + "}" + tag)
+    return ET.Element(tag)
+
+
+class Model:
+    """Mixin for all client request body/response body models to support
+    serialization and deserialization.
+    """
+
+    _subtype_map: Dict[str, Dict[str, Any]] = {}
+    _attribute_map: Dict[str, Dict[str, Any]] = {}
+    _validation: Dict[str, Dict[str, Any]] = {}
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.additional_properties: Optional[Dict[str, Any]] = {}
+        for k in kwargs:  # pylint: disable=consider-using-dict-items
+            if k not in self._attribute_map:
+                _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+            elif k in self._validation and self._validation[k].get("readonly", False):
+                _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+            else:
+                setattr(self, k, kwargs[k])
+
+    def __eq__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are equal
+        :rtype: bool
+        """
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    def __ne__(self, other: Any) -> bool:
+        """Compare objects by comparing all attributes.
+
+        :param object other: The object to compare
+        :returns: True if objects are not equal
+        :rtype: bool
+        """
+        return not self.__eq__(other)
+
+    def __str__(self) -> str:
+        return str(self.__dict__)
+
+    @classmethod
+    def enable_additional_properties_sending(cls) -> None:
+        cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+    @classmethod
+    def is_xml_model(cls) -> bool:
+        try:
+            cls._xml_map  # type: ignore
+        except AttributeError:
+            return False
+        return True
+
+    @classmethod
+    def _create_xml_node(cls):
+        """Create XML node.
+
+        :returns: The XML node
+        :rtype: xml.etree.ElementTree.Element
+        """
+        try:
+            xml_map = cls._xml_map  # type: ignore
+        except AttributeError:
+            xml_map = {}
+
+        return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+    def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+        """Return the JSON that would be sent to server from this model.
+
+        This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, keep_readonly=keep_readonly, **kwargs
+        )
+
+    def as_dict(
+        self,
+        keep_readonly: bool = True,
+        key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer,
+        **kwargs: Any
+    ) -> JSON:
+        """Return a dict that can be serialized using json.dump.
+
+        Advanced usage might optionally use a callback as parameter:
+
+        .. code::python
+
+            def my_key_transformer(key, attr_desc, value):
+                return key
+
+        Key is the attribute name used in Python. Attr_desc
+        is a dict of metadata. Currently contains 'type' with the
+        msrest type and 'key' with the RestAPI encoded key.
+        Value is the current value in this object.
+
+        The string returned will be used to serialize the key.
+        If the return type is a list, this is considered hierarchical
+        result dict.
+
+        See the three examples in this file:
+
+        - attribute_transformer
+        - full_restapi_key_transformer
+        - last_restapi_key_transformer
+
+        If you want XML serialization, you can pass the kwargs is_xml=True.
+
+        :param bool keep_readonly: If you want to serialize the readonly attributes
+        :param function key_transformer: A key transformer function.
+        :returns: A dict JSON compatible object
+        :rtype: dict
+        """
+        serializer = Serializer(self._infer_class_models())
+        return serializer._serialize(  # type: ignore # pylint: disable=protected-access
+            self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+        )
+
+    @classmethod
+    def _infer_class_models(cls):
+        try:
+            str_models = cls.__module__.rsplit(".", 1)[0]
+            models = sys.modules[str_models]
+            client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+            if cls.__name__ not in client_models:
+                raise ValueError("Not Autorest generated code")
+        except Exception:  # pylint: disable=broad-exception-caught
+            # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+            client_models = {cls.__name__: cls}
+        return client_models
+
+    @classmethod
+    def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+        """Parse a str using the RestAPI syntax and return a model.
+
+        :param str data: A str using RestAPI structure. JSON by default.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises DeserializationError: if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def from_dict(
+        cls,
+        data: Any,
+        key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None,
+        content_type: Optional[str] = None,
+    ) -> Self:
+        """Parse a dict using given key extractor return a model.
+
+        By default consider key
+        extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+        and last_rest_key_case_insensitive_extractor)
+
+        :param dict data: A dict using RestAPI structure
+        :param function key_extractors: A key extractor function.
+        :param str content_type: JSON by default, set application/xml if XML.
+        :returns: An instance of this model
+        :raises: DeserializationError if something went wrong
+        :rtype: Self
+        """
+        deserializer = Deserializer(cls._infer_class_models())
+        deserializer.key_extractors = (  # type: ignore
+            [  # type: ignore
+                attribute_key_case_insensitive_extractor,
+                rest_key_case_insensitive_extractor,
+                last_rest_key_case_insensitive_extractor,
+            ]
+            if key_extractors is None
+            else key_extractors
+        )
+        return deserializer(cls.__name__, data, content_type=content_type)  # type: ignore
+
+    @classmethod
+    def _flatten_subtype(cls, key, objects):
+        if "_subtype_map" not in cls.__dict__:
+            return {}
+        result = dict(cls._subtype_map[key])
+        for valuetype in cls._subtype_map[key].values():
+            result.update(objects[valuetype]._flatten_subtype(key, objects))  # pylint: disable=protected-access
+        return result
+
+    @classmethod
+    def _classify(cls, response, objects):
+        """Check the class _subtype_map for any child classes.
+        We want to ignore any inherited _subtype_maps.
+
+        :param dict response: The initial data
+        :param dict objects: The class objects
+        :returns: The class to be used
+        :rtype: class
+        """
+        for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+            subtype_value = None
+
+            if not isinstance(response, ET.Element):
+                rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+                subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+            else:
+                subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+            if subtype_value:
+                # Try to match base class. Can be class name only
+                # (bug to fix in Autorest to support x-ms-discriminator-name)
+                if cls.__name__ == subtype_value:
+                    return cls
+                flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+                try:
+                    return objects[flatten_mapping_type[subtype_value]]  # type: ignore
+                except KeyError:
+                    _LOGGER.warning(
+                        "Subtype value %s has no mapping, use base class %s.",
+                        subtype_value,
+                        cls.__name__,
+                    )
+                    break
+            else:
+                _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+                break
+        return cls
+
+    @classmethod
+    def _get_rest_key_parts(cls, attr_key):
+        """Get the RestAPI key of this attr, split it and decode part
+        :param str attr_key: Attribute key must be in attribute_map.
+        :returns: A list of RestAPI part
+        :rtype: list
+        """
+        rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+        return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+    """This decode a key in an _attribute_map to the actual key we want to look at
+    inside the received data.
+
+    :param str key: A key string from the generated code
+    :returns: The decoded key
+    :rtype: str
+    """
+    return key.replace("\\.", ".")
+
+
+class Serializer:  # pylint: disable=too-many-public-methods
+    """Request object model serializer."""
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+    days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+    months = {
+        1: "Jan",
+        2: "Feb",
+        3: "Mar",
+        4: "Apr",
+        5: "May",
+        6: "Jun",
+        7: "Jul",
+        8: "Aug",
+        9: "Sep",
+        10: "Oct",
+        11: "Nov",
+        12: "Dec",
+    }
+    validation = {
+        "min_length": lambda x, y: len(x) < y,
+        "max_length": lambda x, y: len(x) > y,
+        "minimum": lambda x, y: x < y,
+        "maximum": lambda x, y: x > y,
+        "minimum_ex": lambda x, y: x <= y,
+        "maximum_ex": lambda x, y: x >= y,
+        "min_items": lambda x, y: len(x) < y,
+        "max_items": lambda x, y: len(x) > y,
+        "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+        "unique": lambda x, y: len(x) != len(set(x)),
+        "multiple": lambda x, y: x % y != 0,
+    }
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.serialize_type = {
+            "iso-8601": Serializer.serialize_iso,
+            "rfc-1123": Serializer.serialize_rfc,
+            "unix-time": Serializer.serialize_unix,
+            "duration": Serializer.serialize_duration,
+            "date": Serializer.serialize_date,
+            "time": Serializer.serialize_time,
+            "decimal": Serializer.serialize_decimal,
+            "long": Serializer.serialize_long,
+            "bytearray": Serializer.serialize_bytearray,
+            "base64": Serializer.serialize_base64,
+            "object": self.serialize_object,
+            "[]": self.serialize_iter,
+            "{}": self.serialize_dict,
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_transformer = full_restapi_key_transformer
+        self.client_side_validation = True
+
+    def _serialize(  # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+        self, target_obj, data_type=None, **kwargs
+    ):
+        """Serialize data into a string according to type.
+
+        :param object target_obj: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, dict
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        """
+        key_transformer = kwargs.get("key_transformer", self.key_transformer)
+        keep_readonly = kwargs.get("keep_readonly", False)
+        if target_obj is None:
+            return None
+
+        attr_name = None
+        class_name = target_obj.__class__.__name__
+
+        if data_type:
+            return self.serialize_data(target_obj, data_type, **kwargs)
+
+        if not hasattr(target_obj, "_attribute_map"):
+            data_type = type(target_obj).__name__
+            if data_type in self.basic_types.values():
+                return self.serialize_data(target_obj, data_type, **kwargs)
+
+        # Force "is_xml" kwargs if we detect a XML model
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+        serialized = {}
+        if is_xml_model_serialization:
+            serialized = target_obj._create_xml_node()  # pylint: disable=protected-access
+        try:
+            attributes = target_obj._attribute_map  # pylint: disable=protected-access
+            for attr, attr_desc in attributes.items():
+                attr_name = attr
+                if not keep_readonly and target_obj._validation.get(  # pylint: disable=protected-access
+                    attr_name, {}
+                ).get("readonly", False):
+                    continue
+
+                if attr_name == "additional_properties" and attr_desc["key"] == "":
+                    if target_obj.additional_properties is not None:
+                        serialized.update(target_obj.additional_properties)
+                    continue
+                try:
+
+                    orig_attr = getattr(target_obj, attr)
+                    if is_xml_model_serialization:
+                        pass  # Don't provide "transformer" for XML for now. Keep "orig_attr"
+                    else:  # JSON
+                        keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+                        keys = keys if isinstance(keys, list) else [keys]
+
+                    kwargs["serialization_ctxt"] = attr_desc
+                    new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+                    if is_xml_model_serialization:
+                        xml_desc = attr_desc.get("xml", {})
+                        xml_name = xml_desc.get("name", attr_desc["key"])
+                        xml_prefix = xml_desc.get("prefix", None)
+                        xml_ns = xml_desc.get("ns", None)
+                        if xml_desc.get("attr", False):
+                            if xml_ns:
+                                ET.register_namespace(xml_prefix, xml_ns)
+                                xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+                            serialized.set(xml_name, new_attr)  # type: ignore
+                            continue
+                        if xml_desc.get("text", False):
+                            serialized.text = new_attr  # type: ignore
+                            continue
+                        if isinstance(new_attr, list):
+                            serialized.extend(new_attr)  # type: ignore
+                        elif isinstance(new_attr, ET.Element):
+                            # If the down XML has no XML/Name,
+                            # we MUST replace the tag with the local tag. But keeping the namespaces.
+                            if "name" not in getattr(orig_attr, "_xml_map", {}):
+                                splitted_tag = new_attr.tag.split("}")
+                                if len(splitted_tag) == 2:  # Namespace
+                                    new_attr.tag = "}".join([splitted_tag[0], xml_name])
+                                else:
+                                    new_attr.tag = xml_name
+                            serialized.append(new_attr)  # type: ignore
+                        else:  # That's a basic type
+                            # Integrate namespace if necessary
+                            local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+                            local_node.text = str(new_attr)
+                            serialized.append(local_node)  # type: ignore
+                    else:  # JSON
+                        for k in reversed(keys):  # type: ignore
+                            new_attr = {k: new_attr}
+
+                        _new_attr = new_attr
+                        _serialized = serialized
+                        for k in keys:  # type: ignore
+                            if k not in _serialized:
+                                _serialized.update(_new_attr)  # type: ignore
+                            _new_attr = _new_attr[k]  # type: ignore
+                            _serialized = _serialized[k]
+                except ValueError as err:
+                    if isinstance(err, SerializationError):
+                        raise
+
+        except (AttributeError, KeyError, TypeError) as err:
+            msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+            raise SerializationError(msg) from err
+        return serialized
+
+    def body(self, data, data_type, **kwargs):
+        """Serialize data intended for a request body.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: dict
+        :raises SerializationError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized request body
+        """
+
+        # Just in case this is a dict
+        internal_data_type_str = data_type.strip("[]{}")
+        internal_data_type = self.dependencies.get(internal_data_type_str, None)
+        try:
+            is_xml_model_serialization = kwargs["is_xml"]
+        except KeyError:
+            if internal_data_type and issubclass(internal_data_type, Model):
+                is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+            else:
+                is_xml_model_serialization = False
+        if internal_data_type and not isinstance(internal_data_type, Enum):
+            try:
+                deserializer = Deserializer(self.dependencies)
+                # Since it's on serialization, it's almost sure that format is not JSON REST
+                # We're not able to deal with additional properties for now.
+                deserializer.additional_properties_detection = False
+                if is_xml_model_serialization:
+                    deserializer.key_extractors = [  # type: ignore
+                        attribute_key_case_insensitive_extractor,
+                    ]
+                else:
+                    deserializer.key_extractors = [
+                        rest_key_case_insensitive_extractor,
+                        attribute_key_case_insensitive_extractor,
+                        last_rest_key_case_insensitive_extractor,
+                    ]
+                data = deserializer._deserialize(data_type, data)  # pylint: disable=protected-access
+            except DeserializationError as err:
+                raise SerializationError("Unable to build a model: " + str(err)) from err
+
+        return self._serialize(data, data_type, **kwargs)
+
+    def url(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL path.
+
+        :param str name: The name of the URL path parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :returns: The serialized URL path
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        """
+        try:
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+                output = output.replace("{", quote("{")).replace("}", quote("}"))
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return output
+
+    def query(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a URL query.
+
+        :param str name: The name of the query parameter.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str, list
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized query parameter
+        """
+        try:
+            # Treat the list aside, since we don't want to encode the div separator
+            if data_type.startswith("["):
+                internal_data_type = data_type[1:-1]
+                do_quote = not kwargs.get("skip_quote", False)
+                return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+            # Not a list, regular serialization
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+            if kwargs.get("skip_quote") is True:
+                output = str(output)
+            else:
+                output = quote(str(output), safe="")
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def header(self, name, data, data_type, **kwargs):
+        """Serialize data intended for a request header.
+
+        :param str name: The name of the header.
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :rtype: str
+        :raises TypeError: if serialization fails.
+        :raises ValueError: if data is None
+        :returns: The serialized header
+        """
+        try:
+            if data_type in ["[str]"]:
+                data = ["" if d is None else d for d in data]
+
+            output = self.serialize_data(data, data_type, **kwargs)
+            if data_type == "bool":
+                output = json.dumps(output)
+        except SerializationError as exc:
+            raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+        return str(output)
+
+    def serialize_data(self, data, data_type, **kwargs):
+        """Serialize generic data according to supplied data type.
+
+        :param object data: The data to be serialized.
+        :param str data_type: The type to be serialized from.
+        :raises AttributeError: if required data is None.
+        :raises ValueError: if data is None
+        :raises SerializationError: if serialization fails.
+        :returns: The serialized data.
+        :rtype: str, int, float, bool, dict, list
+        """
+        if data is None:
+            raise ValueError("No value for given attribute")
+
+        try:
+            if data is CoreNull:
+                return None
+            if data_type in self.basic_types.values():
+                return self.serialize_basic(data, data_type, **kwargs)
+
+            if data_type in self.serialize_type:
+                return self.serialize_type[data_type](data, **kwargs)
+
+            # If dependencies is empty, try with current data class
+            # It has to be a subclass of Enum anyway
+            enum_type = self.dependencies.get(data_type, data.__class__)
+            if issubclass(enum_type, Enum):
+                return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.serialize_type:
+                return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+        except (ValueError, TypeError) as err:
+            msg = "Unable to serialize value: {!r} as type: {!r}."
+            raise SerializationError(msg.format(data, data_type)) from err
+        return self._serialize(data, **kwargs)
+
+    @classmethod
+    def _get_custom_serializers(cls, data_type, **kwargs):  # pylint: disable=inconsistent-return-statements
+        custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+        if custom_serializer:
+            return custom_serializer
+        if kwargs.get("is_xml", False):
+            return cls._xml_basic_types_serializers.get(data_type)
+
+    @classmethod
+    def serialize_basic(cls, data, data_type, **kwargs):
+        """Serialize basic builting data type.
+        Serializes objects to str, int, float or bool.
+
+        Possible kwargs:
+        - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+        - is_xml bool : If set, use xml_basic_types_serializers
+
+        :param obj data: Object to be serialized.
+        :param str data_type: Type of object in the iterable.
+        :rtype: str, int, float, bool
+        :return: serialized object
+        """
+        custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+        if custom_serializer:
+            return custom_serializer(data)
+        if data_type == "str":
+            return cls.serialize_unicode(data)
+        return eval(data_type)(data)  # nosec # pylint: disable=eval-used
+
+    @classmethod
+    def serialize_unicode(cls, data):
+        """Special handling for serializing unicode strings in Py2.
+        Encode to UTF-8 if unicode, otherwise handle as a str.
+
+        :param str data: Object to be serialized.
+        :rtype: str
+        :return: serialized object
+        """
+        try:  # If I received an enum, return its value
+            return data.value
+        except AttributeError:
+            pass
+
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                # Don't change it, JSON and XML ElementTree are totally able
+                # to serialize correctly u'' strings
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    def serialize_iter(self, data, iter_type, div=None, **kwargs):
+        """Serialize iterable.
+
+        Supported kwargs:
+        - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+          serialization_ctxt['type'] should be same as data_type.
+        - is_xml bool : If set, serialize as XML
+
+        :param list data: Object to be serialized.
+        :param str iter_type: Type of object in the iterable.
+        :param str div: If set, this str will be used to combine the elements
+         in the iterable into a combined string. Default is 'None'.
+        Defaults to False.
+        :rtype: list, str
+        :return: serialized iterable
+        """
+        if isinstance(data, str):
+            raise SerializationError("Refuse str type as a valid iter type.")
+
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        is_xml = kwargs.get("is_xml", False)
+
+        serialized = []
+        for d in data:
+            try:
+                serialized.append(self.serialize_data(d, iter_type, **kwargs))
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized.append(None)
+
+        if kwargs.get("do_quote", False):
+            serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+        if div:
+            serialized = ["" if s is None else str(s) for s in serialized]
+            serialized = div.join(serialized)
+
+        if "xml" in serialization_ctxt or is_xml:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt.get("xml", {})
+            xml_name = xml_desc.get("name")
+            if not xml_name:
+                xml_name = serialization_ctxt["key"]
+
+            # Create a wrap node if necessary (use the fact that Element and list have "append")
+            is_wrapped = xml_desc.get("wrapped", False)
+            node_name = xml_desc.get("itemsName", xml_name)
+            if is_wrapped:
+                final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            else:
+                final_result = []
+            # All list elements to "local_node"
+            for el in serialized:
+                if isinstance(el, ET.Element):
+                    el_node = el
+                else:
+                    el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+                    if el is not None:  # Otherwise it writes "None" :-p
+                        el_node.text = str(el)
+                final_result.append(el_node)
+            return final_result
+        return serialized
+
+    def serialize_dict(self, attr, dict_type, **kwargs):
+        """Serialize a dictionary of objects.
+
+        :param dict attr: Object to be serialized.
+        :param str dict_type: Type of object in the dictionary.
+        :rtype: dict
+        :return: serialized dictionary
+        """
+        serialization_ctxt = kwargs.get("serialization_ctxt", {})
+        serialized = {}
+        for key, value in attr.items():
+            try:
+                serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+            except ValueError as err:
+                if isinstance(err, SerializationError):
+                    raise
+                serialized[self.serialize_unicode(key)] = None
+
+        if "xml" in serialization_ctxt:
+            # XML serialization is more complicated
+            xml_desc = serialization_ctxt["xml"]
+            xml_name = xml_desc["name"]
+
+            final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+            for key, value in serialized.items():
+                ET.SubElement(final_result, key).text = value
+            return final_result
+
+        return serialized
+
+    def serialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Serialize a generic object.
+        This will be handled as a dictionary. If object passed in is not
+        a basic type (str, int, float, dict, list) it will simply be
+        cast to str.
+
+        :param dict attr: Object to be serialized.
+        :rtype: dict or str
+        :return: serialized object
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            return attr
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+        if obj_type is _long_type:
+            return self.serialize_long(attr)
+        if obj_type is str:
+            return self.serialize_unicode(attr)
+        if obj_type is datetime.datetime:
+            return self.serialize_iso(attr)
+        if obj_type is datetime.date:
+            return self.serialize_date(attr)
+        if obj_type is datetime.time:
+            return self.serialize_time(attr)
+        if obj_type is datetime.timedelta:
+            return self.serialize_duration(attr)
+        if obj_type is decimal.Decimal:
+            return self.serialize_decimal(attr)
+
+        # If it's a model or I know this dependency, serialize as a Model
+        if obj_type in self.dependencies.values() or isinstance(attr, Model):
+            return self._serialize(attr)
+
+        if obj_type == dict:
+            serialized = {}
+            for key, value in attr.items():
+                try:
+                    serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+                except ValueError:
+                    serialized[self.serialize_unicode(key)] = None
+            return serialized
+
+        if obj_type == list:
+            serialized = []
+            for obj in attr:
+                try:
+                    serialized.append(self.serialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return serialized
+        return str(attr)
+
+    @staticmethod
+    def serialize_enum(attr, enum_obj=None):
+        try:
+            result = attr.value
+        except AttributeError:
+            result = attr
+        try:
+            enum_obj(result)  # type: ignore
+            return result
+        except ValueError as exc:
+            for enum_value in enum_obj:  # type: ignore
+                if enum_value.value.lower() == str(attr).lower():
+                    return enum_value.value
+            error = "{!r} is not valid value for enum {!r}"
+            raise SerializationError(error.format(attr, enum_obj)) from exc
+
+    @staticmethod
+    def serialize_bytearray(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize bytearray into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        return b64encode(attr).decode()
+
+    @staticmethod
+    def serialize_base64(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize str into base-64 string.
+
+        :param str attr: Object to be serialized.
+        :rtype: str
+        :return: serialized base64
+        """
+        encoded = b64encode(attr).decode("ascii")
+        return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+    @staticmethod
+    def serialize_decimal(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Decimal object to float.
+
+        :param decimal attr: Object to be serialized.
+        :rtype: float
+        :return: serialized decimal
+        """
+        return float(attr)
+
+    @staticmethod
+    def serialize_long(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize long (Py2) or int (Py3).
+
+        :param int attr: Object to be serialized.
+        :rtype: int/long
+        :return: serialized long
+        """
+        return _long_type(attr)
+
+    @staticmethod
+    def serialize_date(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Date object into ISO-8601 formatted string.
+
+        :param Date attr: Object to be serialized.
+        :rtype: str
+        :return: serialized date
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_date(attr)
+        t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+        return t
+
+    @staticmethod
+    def serialize_time(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Time object into ISO-8601 formatted string.
+
+        :param datetime.time attr: Object to be serialized.
+        :rtype: str
+        :return: serialized time
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_time(attr)
+        t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+        if attr.microsecond:
+            t += ".{:02}".format(attr.microsecond)
+        return t
+
+    @staticmethod
+    def serialize_duration(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize TimeDelta object into ISO-8601 formatted string.
+
+        :param TimeDelta attr: Object to be serialized.
+        :rtype: str
+        :return: serialized duration
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_duration(attr)
+        return isodate.duration_isoformat(attr)
+
+    @staticmethod
+    def serialize_rfc(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into RFC-1123 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises TypeError: if format invalid.
+        :return: serialized rfc
+        """
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+        except AttributeError as exc:
+            raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+        return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+            Serializer.days[utc.tm_wday],
+            utc.tm_mday,
+            Serializer.months[utc.tm_mon],
+            utc.tm_year,
+            utc.tm_hour,
+            utc.tm_min,
+            utc.tm_sec,
+        )
+
+    @staticmethod
+    def serialize_iso(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into ISO-8601 formatted string.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: str
+        :raises SerializationError: if format invalid.
+        :return: serialized iso
+        """
+        if isinstance(attr, str):
+            attr = isodate.parse_datetime(attr)
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            utc = attr.utctimetuple()
+            if utc.tm_year > 9999 or utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+
+            microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+            if microseconds:
+                microseconds = "." + microseconds
+            date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+                utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+            )
+            return date + microseconds + "Z"
+        except (ValueError, OverflowError) as err:
+            msg = "Unable to serialize datetime object."
+            raise SerializationError(msg) from err
+        except AttributeError as err:
+            msg = "ISO-8601 object must be valid Datetime object."
+            raise TypeError(msg) from err
+
+    @staticmethod
+    def serialize_unix(attr, **kwargs):  # pylint: disable=unused-argument
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param Datetime attr: Object to be serialized.
+        :rtype: int
+        :raises SerializationError: if format invalid
+        :return: serialied unix
+        """
+        if isinstance(attr, int):
+            return attr
+        try:
+            if not attr.tzinfo:
+                _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+            return int(calendar.timegm(attr.utctimetuple()))
+        except AttributeError as exc:
+            raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        # Need the cast, as for some reasons "split" is typed as list[str | Any]
+        dict_keys = cast(List[str], _FLATTEN.split(key))
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = working_data.get(working_key, data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor(  # pylint: disable=unused-argument, inconsistent-return-statements
+    attr, attr_desc, data
+):
+    key = attr_desc["key"]
+    working_data = data
+
+    while "." in key:
+        dict_keys = _FLATTEN.split(key)
+        if len(dict_keys) == 1:
+            key = _decode_attribute_map_key(dict_keys[0])
+            break
+        working_key = _decode_attribute_map_key(dict_keys[0])
+        working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+        if working_data is None:
+            # If at any point while following flatten JSON path see None, it means
+            # that all properties under are None as well
+            return None
+        key = ".".join(dict_keys[1:])
+
+    if working_data:
+        return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument
+    """Extract the attribute in "data" based on the last part of the JSON path key.
+
+    This is the case insensitive version of "last_rest_key_extractor"
+    :param str attr: The attribute to extract
+    :param dict attr_desc: The attribute description
+    :param dict data: The data to extract from
+    :rtype: object
+    :returns: The extracted attribute
+    """
+    key = attr_desc["key"]
+    dict_keys = _FLATTEN.split(key)
+    return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+    return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+    found_key = None
+    lower_attr = attr.lower()
+    for key in data:
+        if lower_attr == key.lower():
+            found_key = key
+            break
+
+    return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+    """Given an internal type XML description, extract correct XML name with namespace.
+
+    :param dict internal_type: An model type
+    :rtype: tuple
+    :returns: A tuple XML name + namespace dict
+    """
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+    xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+    xml_ns = internal_type_xml_map.get("ns", None)
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+    return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data):  # pylint: disable=unused-argument,too-many-return-statements
+    if isinstance(data, dict):
+        return None
+
+    # Test if this model is XML ready first
+    if not isinstance(data, ET.Element):
+        return None
+
+    xml_desc = attr_desc.get("xml", {})
+    xml_name = xml_desc.get("name", attr_desc["key"])
+
+    # Look for a children
+    is_iter_type = attr_desc["type"].startswith("[")
+    is_wrapped = xml_desc.get("wrapped", False)
+    internal_type = attr_desc.get("internalType", None)
+    internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+    # Integrate namespace if necessary
+    xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+    if xml_ns:
+        xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+    # If it's an attribute, that's simple
+    if xml_desc.get("attr", False):
+        return data.get(xml_name)
+
+    # If it's x-ms-text, that's simple too
+    if xml_desc.get("text", False):
+        return data.text
+
+    # Scenario where I take the local name:
+    # - Wrapped node
+    # - Internal type is an enum (considered basic types)
+    # - Internal type has no XML/Name node
+    if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+        children = data.findall(xml_name)
+    # If internal type has a local name and it's not a list, I use that name
+    elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+        xml_name = _extract_name_from_internal_type(internal_type)
+        children = data.findall(xml_name)
+    # That's an array
+    else:
+        if internal_type:  # Complex type, ignore itemsName and use the complex type name
+            items_name = _extract_name_from_internal_type(internal_type)
+        else:
+            items_name = xml_desc.get("itemsName", xml_name)
+        children = data.findall(items_name)
+
+    if len(children) == 0:
+        if is_iter_type:
+            if is_wrapped:
+                return None  # is_wrapped no node, we want None
+            return []  # not wrapped, assume empty list
+        return None  # Assume it's not there, maybe an optional node.
+
+    # If is_iter_type and not wrapped, return all found children
+    if is_iter_type:
+        if not is_wrapped:
+            return children
+        # Iter and wrapped, should have found one node only (the wrap one)
+        if len(children) != 1:
+            raise DeserializationError(
+                "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(  # pylint: disable=line-too-long
+                    xml_name
+                )
+            )
+        return list(children[0])  # Might be empty list and that's ok.
+
+    # Here it's not a itertype, we should have found one element only or empty
+    if len(children) > 1:
+        raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+    return children[0]
+
+
+class Deserializer:
+    """Response object model deserializer.
+
+    :param dict classes: Class type dictionary for deserializing complex types.
+    :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+    """
+
+    basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+    valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+    def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+        self.deserialize_type = {
+            "iso-8601": Deserializer.deserialize_iso,
+            "rfc-1123": Deserializer.deserialize_rfc,
+            "unix-time": Deserializer.deserialize_unix,
+            "duration": Deserializer.deserialize_duration,
+            "date": Deserializer.deserialize_date,
+            "time": Deserializer.deserialize_time,
+            "decimal": Deserializer.deserialize_decimal,
+            "long": Deserializer.deserialize_long,
+            "bytearray": Deserializer.deserialize_bytearray,
+            "base64": Deserializer.deserialize_base64,
+            "object": self.deserialize_object,
+            "[]": self.deserialize_iter,
+            "{}": self.deserialize_dict,
+        }
+        self.deserialize_expected_types = {
+            "duration": (isodate.Duration, datetime.timedelta),
+            "iso-8601": (datetime.datetime),
+        }
+        self.dependencies: Dict[str, type] = dict(classes) if classes else {}
+        self.key_extractors = [rest_key_extractor, xml_key_extractor]
+        # Additional properties only works if the "rest_key_extractor" is used to
+        # extract the keys. Making it to work whatever the key extractor is too much
+        # complicated, with no real scenario for now.
+        # So adding a flag to disable additional properties detection. This flag should be
+        # used if your expect the deserialization to NOT come from a JSON REST syntax.
+        # Otherwise, result are unexpected
+        self.additional_properties_detection = True
+
+    def __call__(self, target_obj, response_data, content_type=None):
+        """Call the deserializer to process a REST response.
+
+        :param str target_obj: Target data type to deserialize to.
+        :param requests.Response response_data: REST response object.
+        :param str content_type: Swagger "produces" if available.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        data = self._unpack_content(response_data, content_type)
+        return self._deserialize(target_obj, data)
+
+    def _deserialize(self, target_obj, data):  # pylint: disable=inconsistent-return-statements
+        """Call the deserializer on a model.
+
+        Data needs to be already deserialized as JSON or XML ElementTree
+
+        :param str target_obj: Target data type to deserialize to.
+        :param object data: Object to deserialize.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        # This is already a model, go recursive just in case
+        if hasattr(data, "_attribute_map"):
+            constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+            try:
+                for attr, mapconfig in data._attribute_map.items():  # pylint: disable=protected-access
+                    if attr in constants:
+                        continue
+                    value = getattr(data, attr)
+                    if value is None:
+                        continue
+                    local_type = mapconfig["type"]
+                    internal_data_type = local_type.strip("[]{}")
+                    if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+                        continue
+                    setattr(data, attr, self._deserialize(local_type, value))
+                return data
+            except AttributeError:
+                return
+
+        response, class_name = self._classify_target(target_obj, data)
+
+        if isinstance(response, str):
+            return self.deserialize_data(data, response)
+        if isinstance(response, type) and issubclass(response, Enum):
+            return self.deserialize_enum(data, response)
+
+        if data is None or data is CoreNull:
+            return data
+        try:
+            attributes = response._attribute_map  # type: ignore # pylint: disable=protected-access
+            d_attrs = {}
+            for attr, attr_desc in attributes.items():
+                # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+                if attr == "additional_properties" and attr_desc["key"] == "":
+                    continue
+                raw_value = None
+                # Enhance attr_desc with some dynamic data
+                attr_desc = attr_desc.copy()  # Do a copy, do not change the real one
+                internal_data_type = attr_desc["type"].strip("[]{}")
+                if internal_data_type in self.dependencies:
+                    attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+                for key_extractor in self.key_extractors:
+                    found_value = key_extractor(attr, attr_desc, data)
+                    if found_value is not None:
+                        if raw_value is not None and raw_value != found_value:
+                            msg = (
+                                "Ignoring extracted value '%s' from %s for key '%s'"
+                                " (duplicate extraction, follow extractors order)"
+                            )
+                            _LOGGER.warning(msg, found_value, key_extractor, attr)
+                            continue
+                        raw_value = found_value
+
+                value = self.deserialize_data(raw_value, attr_desc["type"])
+                d_attrs[attr] = value
+        except (AttributeError, TypeError, KeyError) as err:
+            msg = "Unable to deserialize to object: " + class_name  # type: ignore
+            raise DeserializationError(msg) from err
+        additional_properties = self._build_additional_properties(attributes, data)
+        return self._instantiate_model(response, d_attrs, additional_properties)
+
+    def _build_additional_properties(self, attribute_map, data):
+        if not self.additional_properties_detection:
+            return None
+        if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+            # Check empty string. If it's not empty, someone has a real "additionalProperties"
+            return None
+        if isinstance(data, ET.Element):
+            data = {el.tag: el.text for el in data}
+
+        known_keys = {
+            _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+            for desc in attribute_map.values()
+            if desc["key"] != ""
+        }
+        present_keys = set(data.keys())
+        missing_keys = present_keys - known_keys
+        return {key: data[key] for key in missing_keys}
+
+    def _classify_target(self, target, data):
+        """Check to see whether the deserialization target object can
+        be classified into a subclass.
+        Once classification has been determined, initialize object.
+
+        :param str target: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :return: The classified target object and its class name.
+        :rtype: tuple
+        """
+        if target is None:
+            return None, None
+
+        if isinstance(target, str):
+            try:
+                target = self.dependencies[target]
+            except KeyError:
+                return target, target
+
+        try:
+            target = target._classify(data, self.dependencies)  # type: ignore # pylint: disable=protected-access
+        except AttributeError:
+            pass  # Target is not a Model, no classify
+        return target, target.__class__.__name__  # type: ignore
+
+    def failsafe_deserialize(self, target_obj, data, content_type=None):
+        """Ignores any errors encountered in deserialization,
+        and falls back to not deserializing the object. Recommended
+        for use in error deserialization, as we want to return the
+        HttpResponseError to users, and not have them deal with
+        a deserialization error.
+
+        :param str target_obj: The target object type to deserialize to.
+        :param str/dict data: The response data to deserialize.
+        :param str content_type: Swagger "produces" if available.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        try:
+            return self(target_obj, data, content_type=content_type)
+        except:  # pylint: disable=bare-except
+            _LOGGER.debug(
+                "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+            )
+            return None
+
+    @staticmethod
+    def _unpack_content(raw_data, content_type=None):
+        """Extract the correct structure for deserialization.
+
+        If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+        if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+        If not a pipeline response and raw_data is bytes or string, use content-type
+        to decode it. If no content-type, try JSON.
+
+        If raw_data is something else, bypass all logic and return it directly.
+
+        :param obj raw_data: Data to be processed.
+        :param str content_type: How to parse if raw_data is a string/bytes.
+        :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+        :raises UnicodeDecodeError: If bytes is not UTF8
+        :rtype: object
+        :return: Unpacked content.
+        """
+        # Assume this is enough to detect a Pipeline Response without importing it
+        context = getattr(raw_data, "context", {})
+        if context:
+            if RawDeserializer.CONTEXT_NAME in context:
+                return context[RawDeserializer.CONTEXT_NAME]
+            raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+        # Assume this is enough to recognize universal_http.ClientResponse without importing it
+        if hasattr(raw_data, "body"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+        # Assume this enough to recognize requests.Response without importing it.
+        if hasattr(raw_data, "_content_consumed"):
+            return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+        if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+            return RawDeserializer.deserialize_from_text(raw_data, content_type)  # type: ignore
+        return raw_data
+
+    def _instantiate_model(self, response, attrs, additional_properties=None):
+        """Instantiate a response model passing in deserialized args.
+
+        :param Response response: The response model class.
+        :param dict attrs: The deserialized response attributes.
+        :param dict additional_properties: Additional properties to be set.
+        :rtype: Response
+        :return: The instantiated response model.
+        """
+        if callable(response):
+            subtype = getattr(response, "_subtype_map", {})
+            try:
+                readonly = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("readonly")
+                ]
+                const = [
+                    k
+                    for k, v in response._validation.items()  # pylint: disable=protected-access  # type: ignore
+                    if v.get("constant")
+                ]
+                kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+                response_obj = response(**kwargs)
+                for attr in readonly:
+                    setattr(response_obj, attr, attrs.get(attr))
+                if additional_properties:
+                    response_obj.additional_properties = additional_properties  # type: ignore
+                return response_obj
+            except TypeError as err:
+                msg = "Unable to deserialize {} into model {}. ".format(kwargs, response)  # type: ignore
+                raise DeserializationError(msg + str(err)) from err
+        else:
+            try:
+                for attr, value in attrs.items():
+                    setattr(response, attr, value)
+                return response
+            except Exception as exp:
+                msg = "Unable to populate response model. "
+                msg += "Type: {}, Error: {}".format(type(response), exp)
+                raise DeserializationError(msg) from exp
+
+    def deserialize_data(self, data, data_type):  # pylint: disable=too-many-return-statements
+        """Process data for deserialization according to data type.
+
+        :param str data: The response string to be deserialized.
+        :param str data_type: The type to deserialize to.
+        :raises DeserializationError: if deserialization fails.
+        :return: Deserialized object.
+        :rtype: object
+        """
+        if data is None:
+            return data
+
+        try:
+            if not data_type:
+                return data
+            if data_type in self.basic_types.values():
+                return self.deserialize_basic(data, data_type)
+            if data_type in self.deserialize_type:
+                if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+                    return data
+
+                is_a_text_parsing_type = lambda x: x not in [  # pylint: disable=unnecessary-lambda-assignment
+                    "object",
+                    "[]",
+                    r"{}",
+                ]
+                if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+                    return None
+                data_val = self.deserialize_type[data_type](data)
+                return data_val
+
+            iter_type = data_type[0] + data_type[-1]
+            if iter_type in self.deserialize_type:
+                return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+            obj_type = self.dependencies[data_type]
+            if issubclass(obj_type, Enum):
+                if isinstance(data, ET.Element):
+                    data = data.text
+                return self.deserialize_enum(data, obj_type)
+
+        except (ValueError, TypeError, AttributeError) as err:
+            msg = "Unable to deserialize response data."
+            msg += " Data: {}, {}".format(data, data_type)
+            raise DeserializationError(msg) from err
+        return self._deserialize(obj_type, data)
+
+    def deserialize_iter(self, attr, iter_type):
+        """Deserialize an iterable.
+
+        :param list attr: Iterable to be deserialized.
+        :param str iter_type: The type of object in the iterable.
+        :return: Deserialized iterable.
+        :rtype: list
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):  # If I receive an element here, get the children
+            attr = list(attr)
+        if not isinstance(attr, (list, set)):
+            raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+        return [self.deserialize_data(a, iter_type) for a in attr]
+
+    def deserialize_dict(self, attr, dict_type):
+        """Deserialize a dictionary.
+
+        :param dict/list attr: Dictionary to be deserialized. Also accepts
+         a list of key, value pairs.
+        :param str dict_type: The object type of the items in the dictionary.
+        :return: Deserialized dictionary.
+        :rtype: dict
+        """
+        if isinstance(attr, list):
+            return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+        if isinstance(attr, ET.Element):
+            # Transform <Key>value</Key> into {"Key": "value"}
+            attr = {el.tag: el.text for el in attr}
+        return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+    def deserialize_object(self, attr, **kwargs):  # pylint: disable=too-many-return-statements
+        """Deserialize a generic object.
+        This will be handled as a dictionary.
+
+        :param dict attr: Dictionary to be deserialized.
+        :return: Deserialized object.
+        :rtype: dict
+        :raises TypeError: if non-builtin datatype encountered.
+        """
+        if attr is None:
+            return None
+        if isinstance(attr, ET.Element):
+            # Do no recurse on XML, just return the tree as-is
+            return attr
+        if isinstance(attr, str):
+            return self.deserialize_basic(attr, "str")
+        obj_type = type(attr)
+        if obj_type in self.basic_types:
+            return self.deserialize_basic(attr, self.basic_types[obj_type])
+        if obj_type is _long_type:
+            return self.deserialize_long(attr)
+
+        if obj_type == dict:
+            deserialized = {}
+            for key, value in attr.items():
+                try:
+                    deserialized[key] = self.deserialize_object(value, **kwargs)
+                except ValueError:
+                    deserialized[key] = None
+            return deserialized
+
+        if obj_type == list:
+            deserialized = []
+            for obj in attr:
+                try:
+                    deserialized.append(self.deserialize_object(obj, **kwargs))
+                except ValueError:
+                    pass
+            return deserialized
+
+        error = "Cannot deserialize generic object with type: "
+        raise TypeError(error + str(obj_type))
+
+    def deserialize_basic(self, attr, data_type):  # pylint: disable=too-many-return-statements
+        """Deserialize basic builtin data type from string.
+        Will attempt to convert to str, int, float and bool.
+        This function will also accept '1', '0', 'true' and 'false' as
+        valid bool values.
+
+        :param str attr: response string to be deserialized.
+        :param str data_type: deserialization data type.
+        :return: Deserialized basic type.
+        :rtype: str, int, float or bool
+        :raises TypeError: if string format is not valid.
+        """
+        # If we're here, data is supposed to be a basic type.
+        # If it's still an XML node, take the text
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+            if not attr:
+                if data_type == "str":
+                    # None or '', node <a/> is empty string.
+                    return ""
+                # None or '', node <a/> with a strong type is None.
+                # Don't try to model "empty bool" or "empty int"
+                return None
+
+        if data_type == "bool":
+            if attr in [True, False, 1, 0]:
+                return bool(attr)
+            if isinstance(attr, str):
+                if attr.lower() in ["true", "1"]:
+                    return True
+                if attr.lower() in ["false", "0"]:
+                    return False
+            raise TypeError("Invalid boolean value: {}".format(attr))
+
+        if data_type == "str":
+            return self.deserialize_unicode(attr)
+        return eval(data_type)(attr)  # nosec # pylint: disable=eval-used
+
+    @staticmethod
+    def deserialize_unicode(data):
+        """Preserve unicode objects in Python 2, otherwise return data
+        as a string.
+
+        :param str data: response string to be deserialized.
+        :return: Deserialized string.
+        :rtype: str or unicode
+        """
+        # We might be here because we have an enum modeled as string,
+        # and we try to deserialize a partial dict with enum inside
+        if isinstance(data, Enum):
+            return data
+
+        # Consider this is real string
+        try:
+            if isinstance(data, unicode):  # type: ignore
+                return data
+        except NameError:
+            return str(data)
+        return str(data)
+
+    @staticmethod
+    def deserialize_enum(data, enum_obj):
+        """Deserialize string into enum object.
+
+        If the string is not a valid enum value it will be returned as-is
+        and a warning will be logged.
+
+        :param str data: Response string to be deserialized. If this value is
+         None or invalid it will be returned as-is.
+        :param Enum enum_obj: Enum object to deserialize to.
+        :return: Deserialized enum object.
+        :rtype: Enum
+        """
+        if isinstance(data, enum_obj) or data is None:
+            return data
+        if isinstance(data, Enum):
+            data = data.value
+        if isinstance(data, int):
+            # Workaround. We might consider remove it in the future.
+            try:
+                return list(enum_obj.__members__.values())[data]
+            except IndexError as exc:
+                error = "{!r} is not a valid index for enum {!r}"
+                raise DeserializationError(error.format(data, enum_obj)) from exc
+        try:
+            return enum_obj(str(data))
+        except ValueError:
+            for enum_value in enum_obj:
+                if enum_value.value.lower() == str(data).lower():
+                    return enum_value
+            # We don't fail anymore for unknown value, we deserialize as a string
+            _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+            return Deserializer.deserialize_unicode(data)
+
+    @staticmethod
+    def deserialize_bytearray(attr):
+        """Deserialize string into bytearray.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized bytearray
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return bytearray(b64decode(attr))  # type: ignore
+
+    @staticmethod
+    def deserialize_base64(attr):
+        """Deserialize base64 encoded string into string.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized base64 string
+        :rtype: bytearray
+        :raises TypeError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        padding = "=" * (3 - (len(attr) + 3) % 4)  # type: ignore
+        attr = attr + padding  # type: ignore
+        encoded = attr.replace("-", "+").replace("_", "/")
+        return b64decode(encoded)
+
+    @staticmethod
+    def deserialize_decimal(attr):
+        """Deserialize string into Decimal object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized decimal
+        :raises DeserializationError: if string format invalid.
+        :rtype: decimal
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            return decimal.Decimal(str(attr))  # type: ignore
+        except decimal.DecimalException as err:
+            msg = "Invalid decimal {}".format(attr)
+            raise DeserializationError(msg) from err
+
+    @staticmethod
+    def deserialize_long(attr):
+        """Deserialize string into long (Py2) or int (Py3).
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized int
+        :rtype: long or int
+        :raises ValueError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        return _long_type(attr)  # type: ignore
+
+    @staticmethod
+    def deserialize_duration(attr):
+        """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized duration
+        :rtype: TimeDelta
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            duration = isodate.parse_duration(attr)
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize duration object."
+            raise DeserializationError(msg) from err
+        return duration
+
+    @staticmethod
+    def deserialize_date(attr):
+        """Deserialize ISO-8601 formatted string into Date object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized date
+        :rtype: Date
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+        return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+    @staticmethod
+    def deserialize_time(attr):
+        """Deserialize ISO-8601 formatted string into time object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized time
+        :rtype: datetime.time
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        if re.search(r"[^\W\d_]", attr, re.I + re.U):  # type: ignore
+            raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+        return isodate.parse_time(attr)
+
+    @staticmethod
+    def deserialize_rfc(attr):
+        """Deserialize RFC-1123 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized RFC datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            parsed_date = email.utils.parsedate_tz(attr)  # type: ignore
+            date_obj = datetime.datetime(
+                *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+            )
+            if not date_obj.tzinfo:
+                date_obj = date_obj.astimezone(tz=TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to rfc datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_iso(attr):
+        """Deserialize ISO-8601 formatted string into Datetime object.
+
+        :param str attr: response string to be deserialized.
+        :return: Deserialized ISO datetime
+        :rtype: Datetime
+        :raises DeserializationError: if string format invalid.
+        """
+        if isinstance(attr, ET.Element):
+            attr = attr.text
+        try:
+            attr = attr.upper()  # type: ignore
+            match = Deserializer.valid_date.match(attr)
+            if not match:
+                raise ValueError("Invalid datetime string: " + attr)
+
+            check_decimal = attr.split(".")
+            if len(check_decimal) > 1:
+                decimal_str = ""
+                for digit in check_decimal[1]:
+                    if digit.isdigit():
+                        decimal_str += digit
+                    else:
+                        break
+                if len(decimal_str) > 6:
+                    attr = attr.replace(decimal_str, decimal_str[0:6])
+
+            date_obj = isodate.parse_datetime(attr)
+            test_utc = date_obj.utctimetuple()
+            if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+                raise OverflowError("Hit max or min date")
+        except (ValueError, OverflowError, AttributeError) as err:
+            msg = "Cannot deserialize datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
+
+    @staticmethod
+    def deserialize_unix(attr):
+        """Serialize Datetime object into IntTime format.
+        This is represented as seconds.
+
+        :param int attr: Object to be serialized.
+        :return: Deserialized datetime
+        :rtype: Datetime
+        :raises DeserializationError: if format invalid
+        """
+        if isinstance(attr, ET.Element):
+            attr = int(attr.text)  # type: ignore
+        try:
+            attr = int(attr)
+            date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+        except ValueError as err:
+            msg = "Cannot deserialize to unix datetime object."
+            raise DeserializationError(msg) from err
+        return date_obj
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/__init__.py
new file mode 100644
index 00000000..b4f1dd31
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._azure_file_storage import AzureFileStorage  # type: ignore
+
+try:
+    from ._patch import __all__ as _patch_all
+    from ._patch import *
+except ImportError:
+    _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AzureFileStorage",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_azure_file_storage.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_azure_file_storage.py
new file mode 100644
index 00000000..22c80eec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_azure_file_storage.py
@@ -0,0 +1,132 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable, Optional, Union
+from typing_extensions import Self
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+
+from .. import models as _models
+from .._serialization import Deserializer, Serializer
+from ._configuration import AzureFileStorageConfiguration
+from .operations import DirectoryOperations, FileOperations, ServiceOperations, ShareOperations
+
+
+class AzureFileStorage:  # pylint: disable=client-accepts-api-version-keyword
+    """AzureFileStorage.
+
+    :ivar service: ServiceOperations operations
+    :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations
+    :ivar share: ShareOperations operations
+    :vartype share: azure.storage.fileshare.aio.operations.ShareOperations
+    :ivar directory: DirectoryOperations operations
+    :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations
+    :ivar file: FileOperations operations
+    :vartype file: azure.storage.fileshare.aio.operations.FileOperations
+    :param url: The URL of the service account, share, directory or file that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param base_url: Service URL. Required. Default value is "".
+    :type base_url: str
+    :param file_request_intent: Valid value is backup. "backup" Default value is None.
+    :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent
+    :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+     Default value is None.
+    :type allow_trailing_dot: bool
+    :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source
+     URI. Default value is None.
+    :type allow_source_trailing_dot: bool
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-05-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes
+     downloaded from the source url into the specified range. Default value is "update". Note that
+     overriding this default value may result in unsupported behavior.
+    :paramtype file_range_write_from_url: str
+    """
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential
+        self,
+        url: str,
+        base_url: str = "",
+        file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+        allow_trailing_dot: Optional[bool] = None,
+        allow_source_trailing_dot: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        self._config = AzureFileStorageConfiguration(
+            url=url,
+            file_request_intent=file_request_intent,
+            allow_trailing_dot=allow_trailing_dot,
+            allow_source_trailing_dot=allow_source_trailing_dot,
+            **kwargs
+        )
+        _policies = kwargs.pop("policies", None)
+        if _policies is None:
+            _policies = [
+                policies.RequestIdPolicy(**kwargs),
+                self._config.headers_policy,
+                self._config.user_agent_policy,
+                self._config.proxy_policy,
+                policies.ContentDecodePolicy(**kwargs),
+                self._config.redirect_policy,
+                self._config.retry_policy,
+                self._config.authentication_policy,
+                self._config.custom_hook_policy,
+                self._config.logging_policy,
+                policies.DistributedTracingPolicy(**kwargs),
+                policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+                self._config.http_logging_policy,
+            ]
+        self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=base_url, policies=_policies, **kwargs)
+
+        client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+        self._serialize = Serializer(client_models)
+        self._deserialize = Deserializer(client_models)
+        self._serialize.client_side_validation = False
+        self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.share = ShareOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.directory = DirectoryOperations(self._client, self._config, self._serialize, self._deserialize)
+        self.file = FileOperations(self._client, self._config, self._serialize, self._deserialize)
+
+    def _send_request(
+        self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+    ) -> Awaitable[AsyncHttpResponse]:
+        """Runs the network request through the client's chained policies.
+
+        >>> from azure.core.rest import HttpRequest
+        >>> request = HttpRequest("GET", "https://www.example.org/")
+        <HttpRequest [GET], url: 'https://www.example.org/'>
+        >>> response = await client._send_request(request)
+        <AsyncHttpResponse: 200 OK>
+
+        For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+        :param request: The network request you want to make. Required.
+        :type request: ~azure.core.rest.HttpRequest
+        :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+        :return: The response of your network call. Does not do error handling on your response.
+        :rtype: ~azure.core.rest.AsyncHttpResponse
+        """
+
+        request_copy = deepcopy(request)
+        request_copy.url = self._client.format_url(request_copy.url)
+        return self._client.send_request(request_copy, stream=stream, **kwargs)  # type: ignore
+
+    async def close(self) -> None:
+        await self._client.close()
+
+    async def __aenter__(self) -> Self:
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *exc_details: Any) -> None:
+        await self._client.__aexit__(*exc_details)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_configuration.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_configuration.py
new file mode 100644
index 00000000..7cc25f81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_configuration.py
@@ -0,0 +1,77 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Literal, Optional, Union
+
+from azure.core.pipeline import policies
+
+from .. import models as _models
+
+VERSION = "unknown"
+
+
+class AzureFileStorageConfiguration:  # pylint: disable=too-many-instance-attributes
+    """Configuration for AzureFileStorage.
+
+    Note that all parameters used to create this instance are saved as instance
+    attributes.
+
+    :param url: The URL of the service account, share, directory or file that is the target of the
+     desired operation. Required.
+    :type url: str
+    :param file_request_intent: Valid value is backup. "backup" Default value is None.
+    :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent
+    :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+     Default value is None.
+    :type allow_trailing_dot: bool
+    :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source
+     URI. Default value is None.
+    :type allow_source_trailing_dot: bool
+    :keyword version: Specifies the version of the operation to use for this request. Default value
+     is "2025-05-05". Note that overriding this default value may result in unsupported behavior.
+    :paramtype version: str
+    :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes
+     downloaded from the source url into the specified range. Default value is "update". Note that
+     overriding this default value may result in unsupported behavior.
+    :paramtype file_range_write_from_url: str
+    """
+
+    def __init__(
+        self,
+        url: str,
+        file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+        allow_trailing_dot: Optional[bool] = None,
+        allow_source_trailing_dot: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        version: Literal["2025-05-05"] = kwargs.pop("version", "2025-05-05")
+        file_range_write_from_url: Literal["update"] = kwargs.pop("file_range_write_from_url", "update")
+
+        if url is None:
+            raise ValueError("Parameter 'url' must not be None.")
+
+        self.url = url
+        self.file_request_intent = file_request_intent
+        self.allow_trailing_dot = allow_trailing_dot
+        self.allow_source_trailing_dot = allow_source_trailing_dot
+        self.version = version
+        self.file_range_write_from_url = file_range_write_from_url
+        kwargs.setdefault("sdk_moniker", "azurefilestorage/{}".format(VERSION))
+        self.polling_interval = kwargs.get("polling_interval", 30)
+        self._configure(**kwargs)
+
+    def _configure(self, **kwargs: Any) -> None:
+        self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+        self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+        self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+        self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+        self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
+        self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+        self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+        self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+        self.authentication_policy = kwargs.get("authentication_policy")
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000..092b7efd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/__init__.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._share_operations import ShareOperations  # type: ignore
+from ._directory_operations import DirectoryOperations  # type: ignore
+from ._file_operations import FileOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "ShareOperations",
+    "DirectoryOperations",
+    "FileOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_directory_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_directory_operations.py
new file mode 100644
index 00000000..83a84985
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_directory_operations.py
@@ -0,0 +1,1056 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._directory_operations import (
+    build_create_request,
+    build_delete_request,
+    build_force_close_handles_request,
+    build_get_properties_request,
+    build_list_files_and_directories_segment_request,
+    build_list_handles_request,
+    build_rename_request,
+    build_set_metadata_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureFileStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class DirectoryOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s
+        :attr:`directory` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new directory under the specified share or parent directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self, sharesnapshot: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all system properties for the specified directory, and can also be used to check the
+        existence of a directory. The data returned does not include the files in the directory or any
+        subdirectories.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(self, timeout: Optional[int] = None, **kwargs: Any) -> None:
+        # pylint: disable=line-too-long
+        """Removes the specified empty directory. Note that the directory must be empty before it can be
+        deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        timeout: Optional[int] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties on the directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_metadata(
+        self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Updates user defined metadata for the specified directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_files_and_directories_segment(
+        self,
+        prefix: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None,
+        include_extended_info: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.ListFilesAndDirectoriesSegmentResponse:
+        # pylint: disable=line-too-long
+        """Returns a list of files or directories under the specified share or directory. It lists the
+        contents only for a single level of the directory hierarchy.
+
+        :param prefix: Filters the results to return only entries whose name begins with the specified
+         prefix. Default value is None.
+        :type prefix: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType]
+        :param include_extended_info: Include extended information. Default value is None.
+        :type include_extended_info: bool
+        :return: ListFilesAndDirectoriesSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListFilesAndDirectoriesSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_files_and_directories_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            sharesnapshot=sharesnapshot,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            include=include,
+            include_extended_info=include_extended_info,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListFilesAndDirectoriesSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_handles(
+        self,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        recursive: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.ListHandlesResponse:
+        # pylint: disable=line-too-long
+        """Lists handles for directory.
+
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param recursive: Specifies operation should apply to the directory specified in the URI, its
+         files, its subdirectories and their files. Default value is None.
+        :type recursive: bool
+        :return: ListHandlesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+        cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_handles_request(
+            url=self._config.url,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            recursive=recursive,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListHandlesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def force_close_handles(
+        self,
+        handle_id: str,
+        timeout: Optional[int] = None,
+        marker: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        recursive: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Closes all handles open for given directory.
+
+        :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk
+         (‘*’) is a wildcard that specifies all handles. Required.
+        :type handle_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param recursive: Specifies operation should apply to the directory specified in the URI, its
+         files, its subdirectories and their files. Default value is None.
+        :type recursive: bool
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_force_close_handles_request(
+            url=self._config.url,
+            handle_id=handle_id,
+            timeout=timeout,
+            marker=marker,
+            sharesnapshot=sharesnapshot,
+            recursive=recursive,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker"))
+        response_headers["x-ms-number-of-handles-closed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-closed")
+        )
+        response_headers["x-ms-number-of-handles-failed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-failed")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def rename(
+        self,
+        rename_source: str,
+        timeout: Optional[int] = None,
+        replace_if_exists: Optional[bool] = None,
+        ignore_read_only: Optional[bool] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None,
+        destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames a directory.
+
+        :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in
+         length. Required.
+        :type rename_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param replace_if_exists: Optional. A boolean value for if the destination file already exists,
+         whether this request will overwrite the file or not. If true, the rename will succeed and will
+         overwrite the destination file. If not provided or if false and the destination file does
+         exist, the request will not overwrite the destination file. If provided and the destination
+         file doesn’t exist, the rename will succeed. Note: This value does not override the
+         x-ms-file-copy-ignore-read-only header value. Default value is None.
+        :type replace_if_exists: bool
+        :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly
+         attribute on a preexisting destination file should be respected. If true, the rename will
+         succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will
+         cause the rename to fail. Default value is None.
+        :type ignore_read_only: bool
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param source_lease_access_conditions: Parameter group. Default value is None.
+        :type source_lease_access_conditions:
+         ~azure.storage.fileshare.models.SourceLeaseAccessConditions
+        :param destination_lease_access_conditions: Parameter group. Default value is None.
+        :type destination_lease_access_conditions:
+         ~azure.storage.fileshare.models.DestinationLeaseAccessConditions
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_lease_id = None
+        _destination_lease_id = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        if source_lease_access_conditions is not None:
+            _source_lease_id = source_lease_access_conditions.source_lease_id
+        if destination_lease_access_conditions is not None:
+            _destination_lease_id = destination_lease_access_conditions.destination_lease_id
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+
+        _request = build_rename_request(
+            url=self._config.url,
+            rename_source=rename_source,
+            timeout=timeout,
+            replace_if_exists=replace_if_exists,
+            ignore_read_only=ignore_read_only,
+            source_lease_id=_source_lease_id,
+            destination_lease_id=_destination_lease_id,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            metadata=metadata,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_file_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_file_operations.py
new file mode 100644
index 00000000..93f91f24
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_file_operations.py
@@ -0,0 +1,2518 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, AsyncIterator, Callable, Dict, IO, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._file_operations import (
+    build_abort_copy_request,
+    build_acquire_lease_request,
+    build_break_lease_request,
+    build_change_lease_request,
+    build_create_hard_link_request,
+    build_create_request,
+    build_create_symbolic_link_request,
+    build_delete_request,
+    build_download_request,
+    build_force_close_handles_request,
+    build_get_properties_request,
+    build_get_range_list_request,
+    build_get_symbolic_link_request,
+    build_list_handles_request,
+    build_release_lease_request,
+    build_rename_request,
+    build_set_http_headers_request,
+    build_set_metadata_request,
+    build_start_copy_request,
+    build_upload_range_from_url_request,
+    build_upload_range_request,
+)
+from .._configuration import AzureFileStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class FileOperations:  # pylint: disable=too-many-public-methods
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s
+        :attr:`file` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        file_content_length: int,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        nfs_file_type: Optional[Union[str, _models.NfsFileType]] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new file or replaces a file. Note it only initializes the file with no content.
+
+        :param file_content_length: Specifies the maximum size for the file, up to 4 TB. Required.
+        :type file_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param nfs_file_type: Optional, NFS only. Type of the file or directory. Known values are:
+         "Regular", "Directory", and "SymLink". Default value is None.
+        :type nfs_file_type: str or ~azure.storage.fileshare.models.NfsFileType
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_content_type = None
+        _file_content_encoding = None
+        _file_content_language = None
+        _file_cache_control = None
+        _file_content_md5 = None
+        _file_content_disposition = None
+        _lease_id = None
+        if file_http_headers is not None:
+            _file_cache_control = file_http_headers.file_cache_control
+            _file_content_disposition = file_http_headers.file_content_disposition
+            _file_content_encoding = file_http_headers.file_content_encoding
+            _file_content_language = file_http_headers.file_content_language
+            _file_content_md5 = file_http_headers.file_content_md5
+            _file_content_type = file_http_headers.file_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_request(
+            url=self._config.url,
+            file_content_length=file_content_length,
+            timeout=timeout,
+            file_content_type=_file_content_type,
+            file_content_encoding=_file_content_encoding,
+            file_content_language=_file_content_language,
+            file_cache_control=_file_cache_control,
+            file_content_md5=_file_content_md5,
+            file_content_disposition=_file_content_disposition,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            nfs_file_type=nfs_file_type,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            file_type_constant=file_type_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def download(
+        self,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        range_get_content_md5: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> AsyncIterator[bytes]:
+        # pylint: disable=line-too-long
+        """Reads or downloads a file from the system, including its metadata and properties.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return file data only from the specified byte range. Default value is None.
+        :type range: str
+        :param range_get_content_md5: When this header is set to true and specified together with the
+         Range header, the service returns the MD5 hash for the range, as long as the range is less than
+         or equal to 4 MB in size. Default value is None.
+        :type range_get_content_md5: bool
+        :param structured_body_type: Specifies the response content should be returned as a structured
+         message and specifies the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: AsyncIterator[bytes] or the result of cls(response)
+        :rtype: AsyncIterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_download_request(
+            url=self._config.url,
+            timeout=timeout,
+            range=range,
+            range_get_content_md5=range_get_content_md5,
+            structured_body_type=structured_body_type,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                await response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-content-md5"] = self._deserialize("bytearray", response.headers.get("x-ms-content-md5"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+        response_headers["x-ms-structured-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-structured-content-length")
+        )
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all user-defined metadata, standard HTTP properties, and system properties for the
+        file. It does not return the content of the file.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["x-ms-type"] = self._deserialize("str", response.headers.get("x-ms-type"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """removes the file from the storage account.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self,
+        timeout: Optional[int] = None,
+        file_content_length: Optional[int] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets HTTP headers on the file.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_content_length: Resizes a file to the specified size. If the specified byte value
+         is less than the current size of the file, then all ranges above the specified byte value are
+         cleared. Default value is None.
+        :type file_content_length: int
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_content_type = None
+        _file_content_encoding = None
+        _file_content_language = None
+        _file_cache_control = None
+        _file_content_md5 = None
+        _file_content_disposition = None
+        _lease_id = None
+        if file_http_headers is not None:
+            _file_cache_control = file_http_headers.file_cache_control
+            _file_content_disposition = file_http_headers.file_content_disposition
+            _file_content_encoding = file_http_headers.file_content_encoding
+            _file_content_language = file_http_headers.file_content_language
+            _file_content_md5 = file_http_headers.file_content_md5
+            _file_content_type = file_http_headers.file_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_http_headers_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_content_length=file_content_length,
+            file_content_type=_file_content_type,
+            file_content_encoding=_file_content_encoding,
+            file_content_language=_file_content_language,
+            file_cache_control=_file_cache_control,
+            file_content_md5=_file_content_md5,
+            file_content_disposition=_file_content_disposition,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_metadata(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Updates user-defined metadata for the specified file.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def release_lease(
+        self, lease_id: str, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def change_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def break_lease(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def upload_range(
+        self,
+        range: str,
+        content_length: int,
+        timeout: Optional[int] = None,
+        file_range_write: Union[str, _models.FileRangeWriteType] = "update",
+        content_md5: Optional[bytes] = None,
+        file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        optionalbody: Optional[IO[bytes]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Upload a range of bytes to a file.
+
+        :param range: Specifies the range of bytes to be written. Both the start and end of the range
+         must be specified. For an update operation, the range can be up to 4 MB in size. For a clear
+         operation, the range can be up to the value of the file's full size. The File service accepts
+         only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be
+         specified in the following format: bytes=startByte-endByte. Required.
+        :type range: str
+        :param content_length: Specifies the number of bytes being transmitted in the request body.
+         When the x-ms-write header is set to clear, the value of this header must be set to zero.
+         Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_range_write: Specify one of the following options: - Update: Writes the bytes
+         specified by the request body into the specified range. The Range and Content-Length headers
+         must match to perform the update. - Clear: Clears the specified range and releases the space
+         used in storage for that range. To clear a range, set the Content-Length header to zero, and
+         set the Range header to a value that indicates the range to clear, up to maximum file size.
+         Known values are: "update" and "clear". Default value is "update".
+        :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType
+        :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of
+         the data during transport. When the Content-MD5 header is specified, the File service compares
+         the hash of the content that has arrived with the header value that was sent. If the two hashes
+         do not match, the operation will fail with error code 400 (Bad Request). Default value is None.
+        :type content_md5: bytes
+        :param file_last_written_mode: If the file last write time should be preserved or overwritten.
+         Known values are: "Now" and "Preserve". Default value is None.
+        :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :param optionalbody: Initial data. Default value is None.
+        :type optionalbody: IO[bytes]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        _content = optionalbody
+
+        _request = build_upload_range_request(
+            url=self._config.url,
+            range=range,
+            content_length=content_length,
+            timeout=timeout,
+            file_range_write=file_range_write,
+            content_md5=content_md5,
+            lease_id=_lease_id,
+            file_last_written_mode=file_last_written_mode,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def upload_range_from_url(
+        self,
+        range: str,
+        copy_source: str,
+        content_length: int,
+        timeout: Optional[int] = None,
+        source_range: Optional[str] = None,
+        source_content_crc64: Optional[bytes] = None,
+        copy_source_authorization: Optional[str] = None,
+        file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Upload a range of bytes to a file where the contents are read from a URL.
+
+        :param range: Writes data to the specified byte range in the file. Required.
+        :type range: str
+        :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy
+         a file to another file within the same storage account, you may use Shared Key to authenticate
+         the source file. If you are copying a file from another storage account, or if you are copying
+         a blob from the same storage account or another storage account, then you must authenticate the
+         source file or blob using a shared access signature. If the source is a public blob, no
+         authentication is required to perform the copy operation. A file in a share snapshot can also
+         be specified as a copy source. Required.
+        :type copy_source: str
+        :param content_length: Specifies the number of bytes being transmitted in the request body.
+         When the x-ms-write header is set to clear, the value of this header must be set to zero.
+         Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_content_crc64: bytes
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param file_last_written_mode: If the file last write time should be preserved or overwritten.
+         Known values are: "Now" and "Preserve". Default value is None.
+        :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.fileshare.models.SourceModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_match_crc64 = None
+        _source_if_none_match_crc64 = None
+        _lease_id = None
+        if source_modified_access_conditions is not None:
+            _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64
+            _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_upload_range_from_url_request(
+            url=self._config.url,
+            range=range,
+            copy_source=copy_source,
+            content_length=content_length,
+            timeout=timeout,
+            source_range=source_range,
+            source_content_crc64=source_content_crc64,
+            source_if_match_crc64=_source_if_match_crc64,
+            source_if_none_match_crc64=_source_if_none_match_crc64,
+            lease_id=_lease_id,
+            copy_source_authorization=copy_source_authorization,
+            file_last_written_mode=file_last_written_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            file_range_write_from_url=self._config.file_range_write_from_url,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_range_list(
+        self,
+        sharesnapshot: Optional[str] = None,
+        prevsharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        support_rename: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.ShareFileRangeList:
+        # pylint: disable=line-too-long
+        """Returns the list of valid ranges for a file.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that,
+         when present, specifies the previous snapshot. Default value is None.
+        :type prevsharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Specifies the range of bytes over which to list ranges, inclusively. Default
+         value is None.
+        :type range: str
+        :param support_rename: This header is allowed only when PrevShareSnapshot query parameter is
+         set. Determines whether the changed ranges for a file that has been renamed or moved between
+         the target snapshot (or the live file) and the previous snapshot should be listed. If the value
+         is true, the valid changed ranges for the file will be returned. If the value is false, the
+         operation will result in a failure with 409 (Conflict) response. The default value is false.
+         Default value is None.
+        :type support_rename: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: ShareFileRangeList or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ShareFileRangeList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist"))
+        cls: ClsType[_models.ShareFileRangeList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_range_list_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            prevsharesnapshot=prevsharesnapshot,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            support_rename=support_rename,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-content-length"] = self._deserialize("int", response.headers.get("x-ms-content-length"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ShareFileRangeList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def start_copy(
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        file_mode_copy_mode: Optional[Union[str, _models.ModeCopyMode]] = None,
+        file_owner_copy_mode: Optional[Union[str, _models.OwnerCopyMode]] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Copies a blob or file to a destination file within the storage account.
+
+        :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy
+         a file to another file within the same storage account, you may use Shared Key to authenticate
+         the source file. If you are copying a file from another storage account, or if you are copying
+         a blob from the same storage account or another storage account, then you must authenticate the
+         source file or blob using a shared access signature. If the source is a public blob, no
+         authentication is required to perform the copy operation. A file in a share snapshot can also
+         be specified as a copy source. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param file_mode_copy_mode: NFS only. Applicable only when the copy source is a File.
+         Determines the copy behavior of the mode bits of the file. source: The mode on the destination
+         file is copied from the source file. override: The mode on the destination file is determined
+         via the x-ms-mode header. Known values are: "source" and "override". Default value is None.
+        :type file_mode_copy_mode: str or ~azure.storage.fileshare.models.ModeCopyMode
+        :param file_owner_copy_mode: NFS only. Determines the copy behavior of the owner user
+         identifier (UID) and group identifier (GID) of the file. source: The owner user identifier
+         (UID) and group identifier (GID) on the destination file is copied from the source file.
+         override: The owner user identifier (UID) and group identifier (GID) on the destination file is
+         determined via the x-ms-owner and x-ms-group  headers. Known values are: "source" and
+         "override". Default value is None.
+        :type file_owner_copy_mode: str or ~azure.storage.fileshare.models.OwnerCopyMode
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_permission_copy_mode = None
+        _ignore_read_only = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        _set_archive_attribute = None
+        _lease_id = None
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+            _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode
+            _ignore_read_only = copy_file_smb_info.ignore_read_only
+            _set_archive_attribute = copy_file_smb_info.set_archive_attribute
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_start_copy_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_permission_copy_mode=_file_permission_copy_mode,
+            ignore_read_only=_ignore_read_only,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            set_archive_attribute=_set_archive_attribute,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            file_mode_copy_mode=file_mode_copy_mode,
+            file_owner_copy_mode=file_owner_copy_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def abort_copy(
+        self,
+        copy_id: str,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Aborts a pending Copy File operation, and leaves a destination file with zero length and full
+        metadata.
+
+        :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy
+         File operation. Required.
+        :type copy_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+        copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+            "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_abort_copy_request(
+            url=self._config.url,
+            copy_id=copy_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            copy_action_abort_constant=copy_action_abort_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def list_handles(
+        self,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListHandlesResponse:
+        # pylint: disable=line-too-long
+        """Lists handles for file.
+
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :return: ListHandlesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+        cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_handles_request(
+            url=self._config.url,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListHandlesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def force_close_handles(
+        self,
+        handle_id: str,
+        timeout: Optional[int] = None,
+        marker: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Closes all handles open for given file.
+
+        :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk
+         (‘*’) is a wildcard that specifies all handles. Required.
+        :type handle_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_force_close_handles_request(
+            url=self._config.url,
+            handle_id=handle_id,
+            timeout=timeout,
+            marker=marker,
+            sharesnapshot=sharesnapshot,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker"))
+        response_headers["x-ms-number-of-handles-closed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-closed")
+        )
+        response_headers["x-ms-number-of-handles-failed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-failed")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def rename(
+        self,
+        rename_source: str,
+        timeout: Optional[int] = None,
+        replace_if_exists: Optional[bool] = None,
+        ignore_read_only: Optional[bool] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None,
+        destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames a file.
+
+        :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in
+         length. Required.
+        :type rename_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param replace_if_exists: Optional. A boolean value for if the destination file already exists,
+         whether this request will overwrite the file or not. If true, the rename will succeed and will
+         overwrite the destination file. If not provided or if false and the destination file does
+         exist, the request will not overwrite the destination file. If provided and the destination
+         file doesn’t exist, the rename will succeed. Note: This value does not override the
+         x-ms-file-copy-ignore-read-only header value. Default value is None.
+        :type replace_if_exists: bool
+        :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly
+         attribute on a preexisting destination file should be respected. If true, the rename will
+         succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will
+         cause the rename to fail. Default value is None.
+        :type ignore_read_only: bool
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param source_lease_access_conditions: Parameter group. Default value is None.
+        :type source_lease_access_conditions:
+         ~azure.storage.fileshare.models.SourceLeaseAccessConditions
+        :param destination_lease_access_conditions: Parameter group. Default value is None.
+        :type destination_lease_access_conditions:
+         ~azure.storage.fileshare.models.DestinationLeaseAccessConditions
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_lease_id = None
+        _destination_lease_id = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        _file_content_type = None
+        if source_lease_access_conditions is not None:
+            _source_lease_id = source_lease_access_conditions.source_lease_id
+        if destination_lease_access_conditions is not None:
+            _destination_lease_id = destination_lease_access_conditions.destination_lease_id
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+        if file_http_headers is not None:
+            _file_content_type = file_http_headers.file_content_type
+
+        _request = build_rename_request(
+            url=self._config.url,
+            rename_source=rename_source,
+            timeout=timeout,
+            replace_if_exists=replace_if_exists,
+            ignore_read_only=ignore_read_only,
+            source_lease_id=_source_lease_id,
+            destination_lease_id=_destination_lease_id,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            metadata=metadata,
+            file_content_type=_file_content_type,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def create_symbolic_link(
+        self,
+        link_text: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        request_id_parameter: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a symbolic link.
+
+        :param link_text: NFS only. Required. The path to the original file, the symbolic link is
+         pointing to. The path is of type string which is not resolved and is stored as is. The path can
+         be absolute path or the relative path depending on the content stored in the symbolic link
+         file. Required.
+        :type link_text: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_symbolic_link_request(
+            url=self._config.url,
+            link_text=link_text,
+            timeout=timeout,
+            metadata=metadata,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_symbolic_link(
+        self,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """get_symbolic_link.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_symbolic_link_request(
+            url=self._config.url,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-link-text"] = self._deserialize("str", response.headers.get("x-ms-link-text"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def create_hard_link(
+        self,
+        target_file: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a hard link.
+
+        :param target_file: NFS only. Required. Specifies the path of the target file to which the link
+         will be created, up to 2 KiB in length. It should be full path of the target from the root.The
+         target file must be in the same share and hence the same storage account. Required.
+        :type target_file: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["hardlink"] = kwargs.pop("restype", _params.pop("restype", "hardlink"))
+        file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_hard_link_request(
+            url=self._config.url,
+            target_file=target_file,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            file_type_constant=file_type_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000..4814e221
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,284 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._service_operations import (
+    build_get_properties_request,
+    build_list_shares_segment_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureFileStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def set_properties(
+        self, storage_service_properties: _models.StorageServiceProperties, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for a storage account's File service endpoint, including properties for Storage
+        Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param storage_service_properties: The StorageService properties. Required.
+        :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(self, timeout: Optional[int] = None, **kwargs: Any) -> _models.StorageServiceProperties:
+        # pylint: disable=line-too-long
+        """Gets the properties of a storage account's File service, including properties for Storage
+        Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: StorageServiceProperties or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.StorageServiceProperties
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("StorageServiceProperties", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def list_shares_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> _models.ListSharesResponse:
+        # pylint: disable=line-too-long
+        """The List Shares Segment operation returns a list of the shares and share snapshots under the
+        specified account.
+
+        :param prefix: Filters the results to return only entries whose name begins with the specified
+         prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: ListSharesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListSharesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListSharesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_shares_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("ListSharesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_share_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_share_operations.py
new file mode 100644
index 00000000..373424ef
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/aio/operations/_share_operations.py
@@ -0,0 +1,1765 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union, overload
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+
+from ... import models as _models
+from ..._serialization import Deserializer, Serializer
+from ...operations._share_operations import (
+    build_acquire_lease_request,
+    build_break_lease_request,
+    build_change_lease_request,
+    build_create_permission_request,
+    build_create_request,
+    build_create_snapshot_request,
+    build_delete_request,
+    build_get_access_policy_request,
+    build_get_permission_request,
+    build_get_properties_request,
+    build_get_statistics_request,
+    build_release_lease_request,
+    build_renew_lease_request,
+    build_restore_request,
+    build_set_access_policy_request,
+    build_set_metadata_request,
+    build_set_properties_request,
+)
+from .._configuration import AzureFileStorageConfiguration
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+
+class ShareOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s
+        :attr:`share` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs) -> None:
+        input_args = list(args)
+        self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace_async
+    async def create(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        quota: Optional[int] = None,
+        access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+        enabled_protocols: Optional[str] = None,
+        root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+        enable_snapshot_virtual_directory_access: Optional[bool] = None,
+        paid_bursting_enabled: Optional[bool] = None,
+        paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+        paid_bursting_max_iops: Optional[int] = None,
+        share_provisioned_iops: Optional[int] = None,
+        share_provisioned_bandwidth_mibps: Optional[int] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new share under the specified account. If the share with the same name already
+        exists, the operation fails.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None.
+        :type quota: int
+        :param access_tier: Specifies the access tier of the share. Known values are:
+         "TransactionOptimized", "Hot", "Cool", and "Premium". Default value is None.
+        :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :param enabled_protocols: Protocols to enable on the share. Default value is None.
+        :type enabled_protocols: str
+        :param root_squash: Root squash to set on the share.  Only valid for NFS shares. Known values
+         are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None.
+        :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+        :param enable_snapshot_virtual_directory_access: Default value is None.
+        :type enable_snapshot_virtual_directory_access: bool
+        :param paid_bursting_enabled: Optional. Boolean. Default if not specified is false. This
+         property enables paid bursting. Default value is None.
+        :type paid_bursting_enabled: bool
+        :param paid_bursting_max_bandwidth_mibps: Optional. Integer. Default if not specified is the
+         maximum throughput the file share can support. Current maximum for a file share is 10,340
+         MiB/sec. Default value is None.
+        :type paid_bursting_max_bandwidth_mibps: int
+        :param paid_bursting_max_iops: Optional. Integer. Default if not specified is the maximum IOPS
+         the file share can support. Current maximum for a file share is 102,400 IOPS. Default value is
+         None.
+        :type paid_bursting_max_iops: int
+        :param share_provisioned_iops: Optional. Supported in version 2025-01-05 and later. Only
+         allowed for provisioned v2 file shares. Specifies the provisioned number of input/output
+         operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is
+         set to value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_iops: int
+        :param share_provisioned_bandwidth_mibps: Optional. Supported in version 2025-01-05 and later.
+         Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share,
+         in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to
+         value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_bandwidth_mibps: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            quota=quota,
+            access_tier=access_tier,
+            enabled_protocols=enabled_protocols,
+            root_squash=root_squash,
+            enable_snapshot_virtual_directory_access=enable_snapshot_virtual_directory_access,
+            paid_bursting_enabled=paid_bursting_enabled,
+            paid_bursting_max_bandwidth_mibps=paid_bursting_max_bandwidth_mibps,
+            paid_bursting_max_iops=paid_bursting_max_iops,
+            share_provisioned_iops=share_provisioned_iops,
+            share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_properties(
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all user-defined metadata and system properties for the specified share or share
+        snapshot. The data returned does not include the share's list of files.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-ingress-mbps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-ingress-mbps")
+        )
+        response_headers["x-ms-share-provisioned-egress-mbps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-egress-mbps")
+        )
+        response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier"))
+        response_headers["x-ms-access-tier-change-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-access-tier-change-time")
+        )
+        response_headers["x-ms-access-tier-transition-state"] = self._deserialize(
+            "str", response.headers.get("x-ms-access-tier-transition-state")
+        )
+        response_headers["x-ms-enabled-protocols"] = self._deserialize(
+            "str", response.headers.get("x-ms-enabled-protocols")
+        )
+        response_headers["x-ms-root-squash"] = self._deserialize("str", response.headers.get("x-ms-root-squash"))
+        response_headers["x-ms-enable-snapshot-virtual-directory-access"] = self._deserialize(
+            "bool", response.headers.get("x-ms-enable-snapshot-virtual-directory-access")
+        )
+        response_headers["x-ms-share-paid-bursting-enabled"] = self._deserialize(
+            "bool", response.headers.get("x-ms-share-paid-bursting-enabled")
+        )
+        response_headers["x-ms-share-paid-bursting-max-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-paid-bursting-max-iops")
+        )
+        response_headers["x-ms-share-paid-bursting-max-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-paid-bursting-max-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-iops-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-iops-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def delete(
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Operation marks the specified share or share snapshot for deletion. The share or share snapshot
+        and any files contained within it are later deleted during garbage collection.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param delete_snapshots: Specifies the option include to delete the base share and all of its
+         snapshots. Known values are: "include" and "include-leased". Default value is None.
+        :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_delete_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            delete_snapshots=delete_snapshots,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-share-usage-bytes"] = self._deserialize(
+            "int", response.headers.get("x-ms-file-share-usage-bytes")
+        )
+        response_headers["x-ms-file-share-snapshot-usage-bytes"] = self._deserialize(
+            "int", response.headers.get("x-ms-file-share-snapshot-usage-bytes")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def release_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def change_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            proposed_lease_id=proposed_lease_id,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def renew_lease(
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def break_lease(
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            sharesnapshot=sharesnapshot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def create_snapshot(
+        self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a read-only snapshot of a share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_snapshot_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @overload
+    async def create_permission(
+        self,
+        share_permission: _models.SharePermission,
+        timeout: Optional[int] = None,
+        *,
+        content_type: str = "application/json",
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Required.
+        :type share_permission: ~azure.storage.fileshare.models.SharePermission
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+         Default value is "application/json".
+        :paramtype content_type: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+
+    @overload
+    async def create_permission(
+        self,
+        share_permission: IO[bytes],
+        timeout: Optional[int] = None,
+        *,
+        content_type: str = "application/json",
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Required.
+        :type share_permission: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+         Default value is "application/json".
+        :paramtype content_type: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+
+    @distributed_trace_async
+    async def create_permission(
+        self, share_permission: Union[_models.SharePermission, IO[bytes]], timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Is either a
+         SharePermission type or a IO[bytes] type. Required.
+        :type share_permission: ~azure.storage.fileshare.models.SharePermission or IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+        content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        content_type = content_type or "application/json"
+        _json = None
+        _content = None
+        if isinstance(share_permission, (IOBase, bytes)):
+            _content = share_permission
+        else:
+            _json = self._serialize.body(share_permission, "SharePermission")
+
+        _request = build_create_permission_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            json=_json,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_permission(
+        self,
+        file_permission_key: str,
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> _models.SharePermission:
+        # pylint: disable=line-too-long
+        """Returns the permission (security descriptor) for a given key.
+
+        :param file_permission_key: Key of the permission to be set for the directory/file. Required.
+        :type file_permission_key: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: SharePermission or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.SharePermission
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+        cls: ClsType[_models.SharePermission] = kwargs.pop("cls", None)
+
+        _request = build_get_permission_request(
+            url=self._config.url,
+            file_permission_key=file_permission_key,
+            file_permission_format=file_permission_format,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("SharePermission", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def set_properties(
+        self,
+        timeout: Optional[int] = None,
+        quota: Optional[int] = None,
+        access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+        root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+        enable_snapshot_virtual_directory_access: Optional[bool] = None,
+        paid_bursting_enabled: Optional[bool] = None,
+        paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+        paid_bursting_max_iops: Optional[int] = None,
+        share_provisioned_iops: Optional[int] = None,
+        share_provisioned_bandwidth_mibps: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for the specified share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None.
+        :type quota: int
+        :param access_tier: Specifies the access tier of the share. Known values are:
+         "TransactionOptimized", "Hot", "Cool", and "Premium". Default value is None.
+        :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :param root_squash: Root squash to set on the share.  Only valid for NFS shares. Known values
+         are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None.
+        :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+        :param enable_snapshot_virtual_directory_access: Default value is None.
+        :type enable_snapshot_virtual_directory_access: bool
+        :param paid_bursting_enabled: Optional. Boolean. Default if not specified is false. This
+         property enables paid bursting. Default value is None.
+        :type paid_bursting_enabled: bool
+        :param paid_bursting_max_bandwidth_mibps: Optional. Integer. Default if not specified is the
+         maximum throughput the file share can support. Current maximum for a file share is 10,340
+         MiB/sec. Default value is None.
+        :type paid_bursting_max_bandwidth_mibps: int
+        :param paid_bursting_max_iops: Optional. Integer. Default if not specified is the maximum IOPS
+         the file share can support. Current maximum for a file share is 102,400 IOPS. Default value is
+         None.
+        :type paid_bursting_max_iops: int
+        :param share_provisioned_iops: Optional. Supported in version 2025-01-05 and later. Only
+         allowed for provisioned v2 file shares. Specifies the provisioned number of input/output
+         operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is
+         set to value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_iops: int
+        :param share_provisioned_bandwidth_mibps: Optional. Supported in version 2025-01-05 and later.
+         Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share,
+         in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to
+         value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_bandwidth_mibps: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            quota=quota,
+            access_tier=access_tier,
+            lease_id=_lease_id,
+            root_squash=root_squash,
+            enable_snapshot_virtual_directory_access=enable_snapshot_virtual_directory_access,
+            paid_bursting_enabled=paid_bursting_enabled,
+            paid_bursting_max_bandwidth_mibps=paid_bursting_max_bandwidth_mibps,
+            paid_bursting_max_iops=paid_bursting_max_iops,
+            share_provisioned_iops=share_provisioned_iops,
+            share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+        response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-iops-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-iops-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def set_metadata(
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets one or more user-defined name-value pairs for the specified share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> List[_models.SignedIdentifier]:
+        # pylint: disable=line-too-long
+        """Returns information about stored access policies specified on the share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: list of SignedIdentifier or the result of cls(response)
+        :rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("[SignedIdentifier]", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def set_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        share_acl: Optional[List[_models.SignedIdentifier]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets a stored access policy for use with shared access signatures.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :param share_acl: The ACL for the share. Default value is None.
+        :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True}}
+        if share_acl is not None:
+            _content = self._serialize.body(
+                share_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt
+            )
+        else:
+            _content = None
+
+        _request = build_set_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace_async
+    async def get_statistics(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.ShareStats:
+        # pylint: disable=line-too-long
+        """Retrieves statistics related to the share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: ShareStats or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ShareStats
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+        cls: ClsType[_models.ShareStats] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_statistics_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ShareStats", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace_async
+    async def restore(
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        deleted_share_name: Optional[str] = None,
+        deleted_share_version: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Restores a previously deleted Share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param deleted_share_name: Specifies the name of the previously-deleted share. Default value is
+         None.
+        :type deleted_share_name: str
+        :param deleted_share_version: Specifies the version of the previously-deleted share. Default
+         value is None.
+        :type deleted_share_version: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_restore_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            deleted_share_name=deleted_share_name,
+            deleted_share_version=deleted_share_version,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = await self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/__init__.py
new file mode 100644
index 00000000..cb14cb30
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/__init__.py
@@ -0,0 +1,130 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import (  # type: ignore
+    AccessPolicy,
+    ClearRange,
+    CopyFileSmbInfo,
+    CorsRule,
+    DestinationLeaseAccessConditions,
+    DirectoryItem,
+    FileHTTPHeaders,
+    FileItem,
+    FileProperty,
+    FileRange,
+    FilesAndDirectoriesListSegment,
+    HandleItem,
+    LeaseAccessConditions,
+    ListFilesAndDirectoriesSegmentResponse,
+    ListHandlesResponse,
+    ListSharesResponse,
+    Metrics,
+    RetentionPolicy,
+    ShareFileRangeList,
+    ShareItemInternal,
+    SharePermission,
+    SharePropertiesInternal,
+    ShareProtocolSettings,
+    ShareSmbSettings,
+    ShareStats,
+    SignedIdentifier,
+    SmbMultichannel,
+    SourceLeaseAccessConditions,
+    SourceModifiedAccessConditions,
+    StorageError,
+    StorageServiceProperties,
+    StringEncoded,
+)
+
+from ._azure_file_storage_enums import (  # type: ignore
+    AccessRight,
+    CopyStatusType,
+    DeleteSnapshotsOptionType,
+    FileLastWrittenMode,
+    FilePermissionFormat,
+    FileRangeWriteType,
+    LeaseDurationType,
+    LeaseStateType,
+    LeaseStatusType,
+    ListFilesIncludeType,
+    ListSharesIncludeType,
+    ModeCopyMode,
+    NfsFileType,
+    OwnerCopyMode,
+    PermissionCopyModeType,
+    ShareAccessTier,
+    ShareRootSquash,
+    ShareTokenIntent,
+    StorageErrorCode,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "AccessPolicy",
+    "ClearRange",
+    "CopyFileSmbInfo",
+    "CorsRule",
+    "DestinationLeaseAccessConditions",
+    "DirectoryItem",
+    "FileHTTPHeaders",
+    "FileItem",
+    "FileProperty",
+    "FileRange",
+    "FilesAndDirectoriesListSegment",
+    "HandleItem",
+    "LeaseAccessConditions",
+    "ListFilesAndDirectoriesSegmentResponse",
+    "ListHandlesResponse",
+    "ListSharesResponse",
+    "Metrics",
+    "RetentionPolicy",
+    "ShareFileRangeList",
+    "ShareItemInternal",
+    "SharePermission",
+    "SharePropertiesInternal",
+    "ShareProtocolSettings",
+    "ShareSmbSettings",
+    "ShareStats",
+    "SignedIdentifier",
+    "SmbMultichannel",
+    "SourceLeaseAccessConditions",
+    "SourceModifiedAccessConditions",
+    "StorageError",
+    "StorageServiceProperties",
+    "StringEncoded",
+    "AccessRight",
+    "CopyStatusType",
+    "DeleteSnapshotsOptionType",
+    "FileLastWrittenMode",
+    "FilePermissionFormat",
+    "FileRangeWriteType",
+    "LeaseDurationType",
+    "LeaseStateType",
+    "LeaseStatusType",
+    "ListFilesIncludeType",
+    "ListSharesIncludeType",
+    "ModeCopyMode",
+    "NfsFileType",
+    "OwnerCopyMode",
+    "PermissionCopyModeType",
+    "ShareAccessTier",
+    "ShareRootSquash",
+    "ShareTokenIntent",
+    "StorageErrorCode",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_azure_file_storage_enums.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_azure_file_storage_enums.py
new file mode 100644
index 00000000..efc7a7fe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_azure_file_storage_enums.py
@@ -0,0 +1,222 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class AccessRight(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Access rights of the access policy."""
+
+    READ = "Read"
+    WRITE = "Write"
+    DELETE = "Delete"
+
+
+class CopyStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """CopyStatusType."""
+
+    PENDING = "pending"
+    SUCCESS = "success"
+    ABORTED = "aborted"
+    FAILED = "failed"
+
+
+class DeleteSnapshotsOptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """DeleteSnapshotsOptionType."""
+
+    INCLUDE = "include"
+    INCLUDE_LEASED = "include-leased"
+
+
+class FileLastWrittenMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """FileLastWrittenMode."""
+
+    NOW = "Now"
+    PRESERVE = "Preserve"
+
+
+class FilePermissionFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """FilePermissionFormat."""
+
+    SDDL = "Sddl"
+    BINARY = "Binary"
+
+
+class FileRangeWriteType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """FileRangeWriteType."""
+
+    UPDATE = "update"
+    CLEAR = "clear"
+
+
+class LeaseDurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """When a share is leased, specifies whether the lease is of infinite or fixed duration."""
+
+    INFINITE = "infinite"
+    FIXED = "fixed"
+
+
+class LeaseStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Lease state of the share."""
+
+    AVAILABLE = "available"
+    LEASED = "leased"
+    EXPIRED = "expired"
+    BREAKING = "breaking"
+    BROKEN = "broken"
+
+
+class LeaseStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """The current lease status of the share."""
+
+    LOCKED = "locked"
+    UNLOCKED = "unlocked"
+
+
+class ListFilesIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListFilesIncludeType."""
+
+    TIMESTAMPS = "Timestamps"
+    ETAG = "Etag"
+    ATTRIBUTES = "Attributes"
+    PERMISSION_KEY = "PermissionKey"
+
+
+class ListSharesIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ListSharesIncludeType."""
+
+    SNAPSHOTS = "snapshots"
+    METADATA = "metadata"
+    DELETED = "deleted"
+
+
+class ModeCopyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ModeCopyMode."""
+
+    SOURCE = "source"
+    OVERRIDE = "override"
+
+
+class NfsFileType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """NfsFileType."""
+
+    REGULAR = "Regular"
+    DIRECTORY = "Directory"
+    SYM_LINK = "SymLink"
+
+
+class OwnerCopyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """OwnerCopyMode."""
+
+    SOURCE = "source"
+    OVERRIDE = "override"
+
+
+class PermissionCopyModeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """PermissionCopyModeType."""
+
+    SOURCE = "source"
+    OVERRIDE = "override"
+
+
+class ShareAccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ShareAccessTier."""
+
+    TRANSACTION_OPTIMIZED = "TransactionOptimized"
+    HOT = "Hot"
+    COOL = "Cool"
+    PREMIUM = "Premium"
+
+
+class ShareRootSquash(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ShareRootSquash."""
+
+    NO_ROOT_SQUASH = "NoRootSquash"
+    ROOT_SQUASH = "RootSquash"
+    ALL_SQUASH = "AllSquash"
+
+
+class ShareTokenIntent(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """ShareTokenIntent."""
+
+    BACKUP = "backup"
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Error codes returned by the service."""
+
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    FILE_SHARE_PROVISIONED_BANDWIDTH_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedBandwidthDowngradeNotAllowed"
+    FILE_SHARE_PROVISIONED_IOPS_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedIopsDowngradeNotAllowed"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+    CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory"
+    CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay"
+    DELETE_PENDING = "DeletePending"
+    DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty"
+    FILE_LOCK_CONFLICT = "FileLockConflict"
+    INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName"
+    PARENT_NOT_FOUND = "ParentNotFound"
+    READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute"
+    SHARE_ALREADY_EXISTS = "ShareAlreadyExists"
+    SHARE_BEING_DELETED = "ShareBeingDeleted"
+    SHARE_DISABLED = "ShareDisabled"
+    SHARE_NOT_FOUND = "ShareNotFound"
+    SHARING_VIOLATION = "SharingViolation"
+    SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress"
+    SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded"
+    SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported"
+    SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed"
+    AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch"
+    AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch"
+    AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch"
+    AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch"
+    AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_models_py3.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_models_py3.py
new file mode 100644
index 00000000..61fe75ce
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_models_py3.py
@@ -0,0 +1,1711 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
+
+from .. import _serialization
+
+if TYPE_CHECKING:
+    from .. import models as _models
+
+
+class AccessPolicy(_serialization.Model):
+    """An Access policy.
+
+    :ivar start: The date-time the policy is active.
+    :vartype start: str
+    :ivar expiry: The date-time the policy expires.
+    :vartype expiry: str
+    :ivar permission: The permissions for the ACL policy.
+    :vartype permission: str
+    """
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "str"},
+        "expiry": {"key": "Expiry", "type": "str"},
+        "permission": {"key": "Permission", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        start: Optional[str] = None,
+        expiry: Optional[str] = None,
+        permission: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword start: The date-time the policy is active.
+        :paramtype start: str
+        :keyword expiry: The date-time the policy expires.
+        :paramtype expiry: str
+        :keyword permission: The permissions for the ACL policy.
+        :paramtype permission: str
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.expiry = expiry
+        self.permission = permission
+
+
+class ClearRange(_serialization.Model):
+    """ClearRange.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar start: Required.
+    :vartype start: int
+    :ivar end: Required.
+    :vartype end: int
+    """
+
+    _validation = {
+        "start": {"required": True},
+        "end": {"required": True},
+    }
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}},
+        "end": {"key": "End", "type": "int", "xml": {"name": "End"}},
+    }
+    _xml_map = {"name": "ClearRange"}
+
+    def __init__(self, *, start: int, end: int, **kwargs: Any) -> None:
+        """
+        :keyword start: Required.
+        :paramtype start: int
+        :keyword end: Required.
+        :paramtype end: int
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.end = end
+
+
+class CopyFileSmbInfo(_serialization.Model):
+    """Parameter group.
+
+    :ivar file_attributes: Specifies either the option to copy file attributes from a source
+     file(source) to a target file or a list of attributes to set on a target file.
+    :vartype file_attributes: str
+    :ivar file_creation_time: Specifies either the option to copy file creation time from a source
+     file(source) to a target file or a time value in ISO 8601 format to set as creation time on a
+     target file.
+    :vartype file_creation_time: str
+    :ivar file_last_write_time: Specifies either the option to copy file last write time from a
+     source file(source) to a target file or a time value in ISO 8601 format to set as last write
+     time on a target file.
+    :vartype file_last_write_time: str
+    :ivar file_change_time: Specifies either the option to copy file last write time from a source
+     file(source) to a target file or a time value in ISO 8601 format to set as last write time on a
+     target file.
+    :vartype file_change_time: str
+    :ivar file_permission_copy_mode: Specifies the option to copy file security descriptor from
+     source file or to set it using the value which is defined by the header value of
+     x-ms-file-permission or x-ms-file-permission-key. Known values are: "source" and "override".
+    :vartype file_permission_copy_mode: str or
+     ~azure.storage.fileshare.models.PermissionCopyModeType
+    :ivar ignore_read_only: Specifies the option to overwrite the target file if it already exists
+     and has read-only attribute set.
+    :vartype ignore_read_only: bool
+    :ivar set_archive_attribute: Specifies the option to set archive attribute on a target file.
+     True means archive attribute will be set on a target file despite attribute overrides or a
+     source file state.
+    :vartype set_archive_attribute: bool
+    """
+
+    _attribute_map = {
+        "file_attributes": {"key": "fileAttributes", "type": "str"},
+        "file_creation_time": {"key": "fileCreationTime", "type": "str"},
+        "file_last_write_time": {"key": "fileLastWriteTime", "type": "str"},
+        "file_change_time": {"key": "fileChangeTime", "type": "str"},
+        "file_permission_copy_mode": {"key": "filePermissionCopyMode", "type": "str"},
+        "ignore_read_only": {"key": "ignoreReadOnly", "type": "bool"},
+        "set_archive_attribute": {"key": "setArchiveAttribute", "type": "bool"},
+    }
+
+    def __init__(
+        self,
+        *,
+        file_attributes: Optional[str] = None,
+        file_creation_time: Optional[str] = None,
+        file_last_write_time: Optional[str] = None,
+        file_change_time: Optional[str] = None,
+        file_permission_copy_mode: Optional[Union[str, "_models.PermissionCopyModeType"]] = None,
+        ignore_read_only: Optional[bool] = None,
+        set_archive_attribute: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword file_attributes: Specifies either the option to copy file attributes from a source
+         file(source) to a target file or a list of attributes to set on a target file.
+        :paramtype file_attributes: str
+        :keyword file_creation_time: Specifies either the option to copy file creation time from a
+         source file(source) to a target file or a time value in ISO 8601 format to set as creation time
+         on a target file.
+        :paramtype file_creation_time: str
+        :keyword file_last_write_time: Specifies either the option to copy file last write time from a
+         source file(source) to a target file or a time value in ISO 8601 format to set as last write
+         time on a target file.
+        :paramtype file_last_write_time: str
+        :keyword file_change_time: Specifies either the option to copy file last write time from a
+         source file(source) to a target file or a time value in ISO 8601 format to set as last write
+         time on a target file.
+        :paramtype file_change_time: str
+        :keyword file_permission_copy_mode: Specifies the option to copy file security descriptor from
+         source file or to set it using the value which is defined by the header value of
+         x-ms-file-permission or x-ms-file-permission-key. Known values are: "source" and "override".
+        :paramtype file_permission_copy_mode: str or
+         ~azure.storage.fileshare.models.PermissionCopyModeType
+        :keyword ignore_read_only: Specifies the option to overwrite the target file if it already
+         exists and has read-only attribute set.
+        :paramtype ignore_read_only: bool
+        :keyword set_archive_attribute: Specifies the option to set archive attribute on a target file.
+         True means archive attribute will be set on a target file despite attribute overrides or a
+         source file state.
+        :paramtype set_archive_attribute: bool
+        """
+        super().__init__(**kwargs)
+        self.file_attributes = file_attributes
+        self.file_creation_time = file_creation_time
+        self.file_last_write_time = file_last_write_time
+        self.file_change_time = file_change_time
+        self.file_permission_copy_mode = file_permission_copy_mode
+        self.ignore_read_only = ignore_read_only
+        self.set_archive_attribute = set_archive_attribute
+
+
+class CorsRule(_serialization.Model):
+    """CORS is an HTTP feature that enables a web application running under one domain to access
+    resources in another domain. Web browsers implement a security restriction known as same-origin
+    policy that prevents a web page from calling APIs in a different domain; CORS provides a secure
+    way to allow one domain (the origin domain) to call APIs in another domain.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar allowed_origins: The origin domains that are permitted to make a request against the
+     storage service via CORS. The origin domain is the domain from which the request originates.
+     Note that the origin must be an exact case-sensitive match with the origin that the user age
+     sends to the service. You can also use the wildcard character '*' to allow all origin domains
+     to make requests via CORS. Required.
+    :vartype allowed_origins: str
+    :ivar allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a
+     CORS request. (comma separated). Required.
+    :vartype allowed_methods: str
+    :ivar allowed_headers: The request headers that the origin domain may specify on the CORS
+     request. Required.
+    :vartype allowed_headers: str
+    :ivar exposed_headers: The response headers that may be sent in the response to the CORS
+     request and exposed by the browser to the request issuer. Required.
+    :vartype exposed_headers: str
+    :ivar max_age_in_seconds: The maximum amount time that a browser should cache the preflight
+     OPTIONS request. Required.
+    :vartype max_age_in_seconds: int
+    """
+
+    _validation = {
+        "allowed_origins": {"required": True},
+        "allowed_methods": {"required": True},
+        "allowed_headers": {"required": True},
+        "exposed_headers": {"required": True},
+        "max_age_in_seconds": {"required": True, "minimum": 0},
+    }
+
+    _attribute_map = {
+        "allowed_origins": {"key": "AllowedOrigins", "type": "str"},
+        "allowed_methods": {"key": "AllowedMethods", "type": "str"},
+        "allowed_headers": {"key": "AllowedHeaders", "type": "str"},
+        "exposed_headers": {"key": "ExposedHeaders", "type": "str"},
+        "max_age_in_seconds": {"key": "MaxAgeInSeconds", "type": "int"},
+    }
+
+    def __init__(
+        self,
+        *,
+        allowed_origins: str,
+        allowed_methods: str,
+        allowed_headers: str,
+        exposed_headers: str,
+        max_age_in_seconds: int,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword allowed_origins: The origin domains that are permitted to make a request against the
+         storage service via CORS. The origin domain is the domain from which the request originates.
+         Note that the origin must be an exact case-sensitive match with the origin that the user age
+         sends to the service. You can also use the wildcard character '*' to allow all origin domains
+         to make requests via CORS. Required.
+        :paramtype allowed_origins: str
+        :keyword allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a
+         CORS request. (comma separated). Required.
+        :paramtype allowed_methods: str
+        :keyword allowed_headers: The request headers that the origin domain may specify on the CORS
+         request. Required.
+        :paramtype allowed_headers: str
+        :keyword exposed_headers: The response headers that may be sent in the response to the CORS
+         request and exposed by the browser to the request issuer. Required.
+        :paramtype exposed_headers: str
+        :keyword max_age_in_seconds: The maximum amount time that a browser should cache the preflight
+         OPTIONS request. Required.
+        :paramtype max_age_in_seconds: int
+        """
+        super().__init__(**kwargs)
+        self.allowed_origins = allowed_origins
+        self.allowed_methods = allowed_methods
+        self.allowed_headers = allowed_headers
+        self.exposed_headers = exposed_headers
+        self.max_age_in_seconds = max_age_in_seconds
+
+
+class DestinationLeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar destination_lease_id: Required if the destination file has an active infinite lease. The
+     lease ID specified for this header must match the lease ID of the destination file. If the
+     request does not include the lease ID or it is not valid, the operation fails with status code
+     412 (Precondition Failed). If this header is specified and the destination file does not
+     currently have an active lease, the operation will also fail with status code 412 (Precondition
+     Failed).
+    :vartype destination_lease_id: str
+    """
+
+    _attribute_map = {
+        "destination_lease_id": {"key": "destinationLeaseId", "type": "str"},
+    }
+
+    def __init__(self, *, destination_lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword destination_lease_id: Required if the destination file has an active infinite lease.
+         The lease ID specified for this header must match the lease ID of the destination file. If the
+         request does not include the lease ID or it is not valid, the operation fails with status code
+         412 (Precondition Failed). If this header is specified and the destination file does not
+         currently have an active lease, the operation will also fail with status code 412 (Precondition
+         Failed).
+        :paramtype destination_lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.destination_lease_id = destination_lease_id
+
+
+class DirectoryItem(_serialization.Model):
+    """A listed directory item.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: ~azure.storage.fileshare.models.StringEncoded
+    :ivar file_id:
+    :vartype file_id: str
+    :ivar properties: File properties.
+    :vartype properties: ~azure.storage.fileshare.models.FileProperty
+    :ivar attributes:
+    :vartype attributes: str
+    :ivar permission_key:
+    :vartype permission_key: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "StringEncoded"},
+        "file_id": {"key": "FileId", "type": "str"},
+        "properties": {"key": "Properties", "type": "FileProperty"},
+        "attributes": {"key": "Attributes", "type": "str"},
+        "permission_key": {"key": "PermissionKey", "type": "str"},
+    }
+    _xml_map = {"name": "Directory"}
+
+    def __init__(
+        self,
+        *,
+        name: "_models.StringEncoded",
+        file_id: Optional[str] = None,
+        properties: Optional["_models.FileProperty"] = None,
+        attributes: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: ~azure.storage.fileshare.models.StringEncoded
+        :keyword file_id:
+        :paramtype file_id: str
+        :keyword properties: File properties.
+        :paramtype properties: ~azure.storage.fileshare.models.FileProperty
+        :keyword attributes:
+        :paramtype attributes: str
+        :keyword permission_key:
+        :paramtype permission_key: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.file_id = file_id
+        self.properties = properties
+        self.attributes = attributes
+        self.permission_key = permission_key
+
+
+class FileHTTPHeaders(_serialization.Model):
+    """Parameter group.
+
+    :ivar file_content_type: Sets the MIME content type of the file. The default type is
+     'application/octet-stream'.
+    :vartype file_content_type: str
+    :ivar file_content_encoding: Specifies which content encodings have been applied to the file.
+    :vartype file_content_encoding: str
+    :ivar file_content_language: Specifies the natural languages used by this resource.
+    :vartype file_content_language: str
+    :ivar file_cache_control: Sets the file's cache control. The File service stores this value but
+     does not use or modify it.
+    :vartype file_cache_control: str
+    :ivar file_content_md5: Sets the file's MD5 hash.
+    :vartype file_content_md5: bytes
+    :ivar file_content_disposition: Sets the file's Content-Disposition header.
+    :vartype file_content_disposition: str
+    """
+
+    _attribute_map = {
+        "file_content_type": {"key": "fileContentType", "type": "str"},
+        "file_content_encoding": {"key": "fileContentEncoding", "type": "str"},
+        "file_content_language": {"key": "fileContentLanguage", "type": "str"},
+        "file_cache_control": {"key": "fileCacheControl", "type": "str"},
+        "file_content_md5": {"key": "fileContentMD5", "type": "bytearray"},
+        "file_content_disposition": {"key": "fileContentDisposition", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        file_content_type: Optional[str] = None,
+        file_content_encoding: Optional[str] = None,
+        file_content_language: Optional[str] = None,
+        file_cache_control: Optional[str] = None,
+        file_content_md5: Optional[bytes] = None,
+        file_content_disposition: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword file_content_type: Sets the MIME content type of the file. The default type is
+         'application/octet-stream'.
+        :paramtype file_content_type: str
+        :keyword file_content_encoding: Specifies which content encodings have been applied to the
+         file.
+        :paramtype file_content_encoding: str
+        :keyword file_content_language: Specifies the natural languages used by this resource.
+        :paramtype file_content_language: str
+        :keyword file_cache_control: Sets the file's cache control. The File service stores this value
+         but does not use or modify it.
+        :paramtype file_cache_control: str
+        :keyword file_content_md5: Sets the file's MD5 hash.
+        :paramtype file_content_md5: bytes
+        :keyword file_content_disposition: Sets the file's Content-Disposition header.
+        :paramtype file_content_disposition: str
+        """
+        super().__init__(**kwargs)
+        self.file_content_type = file_content_type
+        self.file_content_encoding = file_content_encoding
+        self.file_content_language = file_content_language
+        self.file_cache_control = file_cache_control
+        self.file_content_md5 = file_content_md5
+        self.file_content_disposition = file_content_disposition
+
+
+class FileItem(_serialization.Model):
+    """A listed file item.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: ~azure.storage.fileshare.models.StringEncoded
+    :ivar file_id:
+    :vartype file_id: str
+    :ivar properties: File properties. Required.
+    :vartype properties: ~azure.storage.fileshare.models.FileProperty
+    :ivar attributes:
+    :vartype attributes: str
+    :ivar permission_key:
+    :vartype permission_key: str
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "StringEncoded"},
+        "file_id": {"key": "FileId", "type": "str"},
+        "properties": {"key": "Properties", "type": "FileProperty"},
+        "attributes": {"key": "Attributes", "type": "str"},
+        "permission_key": {"key": "PermissionKey", "type": "str"},
+    }
+    _xml_map = {"name": "File"}
+
+    def __init__(
+        self,
+        *,
+        name: "_models.StringEncoded",
+        properties: "_models.FileProperty",
+        file_id: Optional[str] = None,
+        attributes: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: ~azure.storage.fileshare.models.StringEncoded
+        :keyword file_id:
+        :paramtype file_id: str
+        :keyword properties: File properties. Required.
+        :paramtype properties: ~azure.storage.fileshare.models.FileProperty
+        :keyword attributes:
+        :paramtype attributes: str
+        :keyword permission_key:
+        :paramtype permission_key: str
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.file_id = file_id
+        self.properties = properties
+        self.attributes = attributes
+        self.permission_key = permission_key
+
+
+class FileProperty(_serialization.Model):
+    """File properties.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar content_length: Content length of the file. This value may not be up-to-date since an SMB
+     client may have modified the file locally. The value of Content-Length may not reflect that
+     fact until the handle is closed or the op-lock is broken. To retrieve current property values,
+     call Get File Properties. Required.
+    :vartype content_length: int
+    :ivar creation_time:
+    :vartype creation_time: ~datetime.datetime
+    :ivar last_access_time:
+    :vartype last_access_time: ~datetime.datetime
+    :ivar last_write_time:
+    :vartype last_write_time: ~datetime.datetime
+    :ivar change_time:
+    :vartype change_time: ~datetime.datetime
+    :ivar last_modified:
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag:
+    :vartype etag: str
+    """
+
+    _validation = {
+        "content_length": {"required": True},
+    }
+
+    _attribute_map = {
+        "content_length": {"key": "Content-Length", "type": "int"},
+        "creation_time": {"key": "CreationTime", "type": "iso-8601"},
+        "last_access_time": {"key": "LastAccessTime", "type": "iso-8601"},
+        "last_write_time": {"key": "LastWriteTime", "type": "iso-8601"},
+        "change_time": {"key": "ChangeTime", "type": "iso-8601"},
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+    }
+
+    def __init__(
+        self,
+        *,
+        content_length: int,
+        creation_time: Optional[datetime.datetime] = None,
+        last_access_time: Optional[datetime.datetime] = None,
+        last_write_time: Optional[datetime.datetime] = None,
+        change_time: Optional[datetime.datetime] = None,
+        last_modified: Optional[datetime.datetime] = None,
+        etag: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword content_length: Content length of the file. This value may not be up-to-date since an
+         SMB client may have modified the file locally. The value of Content-Length may not reflect that
+         fact until the handle is closed or the op-lock is broken. To retrieve current property values,
+         call Get File Properties. Required.
+        :paramtype content_length: int
+        :keyword creation_time:
+        :paramtype creation_time: ~datetime.datetime
+        :keyword last_access_time:
+        :paramtype last_access_time: ~datetime.datetime
+        :keyword last_write_time:
+        :paramtype last_write_time: ~datetime.datetime
+        :keyword change_time:
+        :paramtype change_time: ~datetime.datetime
+        :keyword last_modified:
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag:
+        :paramtype etag: str
+        """
+        super().__init__(**kwargs)
+        self.content_length = content_length
+        self.creation_time = creation_time
+        self.last_access_time = last_access_time
+        self.last_write_time = last_write_time
+        self.change_time = change_time
+        self.last_modified = last_modified
+        self.etag = etag
+
+
+class FileRange(_serialization.Model):
+    """An Azure Storage file range.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar start: Start of the range. Required.
+    :vartype start: int
+    :ivar end: End of the range. Required.
+    :vartype end: int
+    """
+
+    _validation = {
+        "start": {"required": True},
+        "end": {"required": True},
+    }
+
+    _attribute_map = {
+        "start": {"key": "Start", "type": "int"},
+        "end": {"key": "End", "type": "int"},
+    }
+    _xml_map = {"name": "Range"}
+
+    def __init__(self, *, start: int, end: int, **kwargs: Any) -> None:
+        """
+        :keyword start: Start of the range. Required.
+        :paramtype start: int
+        :keyword end: End of the range. Required.
+        :paramtype end: int
+        """
+        super().__init__(**kwargs)
+        self.start = start
+        self.end = end
+
+
+class FilesAndDirectoriesListSegment(_serialization.Model):
+    """Abstract for entries that can be listed from Directory.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar directory_items: Required.
+    :vartype directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
+    :ivar file_items: Required.
+    :vartype file_items: list[~azure.storage.fileshare.models.FileItem]
+    """
+
+    _validation = {
+        "directory_items": {"required": True},
+        "file_items": {"required": True},
+    }
+
+    _attribute_map = {
+        "directory_items": {"key": "DirectoryItems", "type": "[DirectoryItem]", "xml": {"itemsName": "Directory"}},
+        "file_items": {"key": "FileItems", "type": "[FileItem]", "xml": {"itemsName": "File"}},
+    }
+    _xml_map = {"name": "Entries"}
+
+    def __init__(
+        self, *, directory_items: List["_models.DirectoryItem"], file_items: List["_models.FileItem"], **kwargs: Any
+    ) -> None:
+        """
+        :keyword directory_items: Required.
+        :paramtype directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
+        :keyword file_items: Required.
+        :paramtype file_items: list[~azure.storage.fileshare.models.FileItem]
+        """
+        super().__init__(**kwargs)
+        self.directory_items = directory_items
+        self.file_items = file_items
+
+
+class HandleItem(_serialization.Model):
+    """A listed Azure Storage handle item.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar handle_id: XSMB service handle ID. Required.
+    :vartype handle_id: str
+    :ivar path: Required.
+    :vartype path: ~azure.storage.fileshare.models.StringEncoded
+    :ivar file_id: FileId uniquely identifies the file or directory. Required.
+    :vartype file_id: str
+    :ivar parent_id: ParentId uniquely identifies the parent directory of the object.
+    :vartype parent_id: str
+    :ivar session_id: SMB session ID in context of which the file handle was opened. Required.
+    :vartype session_id: str
+    :ivar client_ip: Client IP that opened the handle. Required.
+    :vartype client_ip: str
+    :ivar client_name: Name of the client machine where the share is being mounted. Required.
+    :vartype client_name: str
+    :ivar open_time: Time when the session that previously opened the handle has last been
+     reconnected. (UTC). Required.
+    :vartype open_time: ~datetime.datetime
+    :ivar last_reconnect_time: Time handle was last connected to (UTC).
+    :vartype last_reconnect_time: ~datetime.datetime
+    :ivar access_right_list:
+    :vartype access_right_list: list[str or ~azure.storage.fileshare.models.AccessRight]
+    """
+
+    _validation = {
+        "handle_id": {"required": True},
+        "path": {"required": True},
+        "file_id": {"required": True},
+        "session_id": {"required": True},
+        "client_ip": {"required": True},
+        "client_name": {"required": True},
+        "open_time": {"required": True},
+    }
+
+    _attribute_map = {
+        "handle_id": {"key": "HandleId", "type": "str"},
+        "path": {"key": "Path", "type": "StringEncoded"},
+        "file_id": {"key": "FileId", "type": "str"},
+        "parent_id": {"key": "ParentId", "type": "str"},
+        "session_id": {"key": "SessionId", "type": "str"},
+        "client_ip": {"key": "ClientIp", "type": "str"},
+        "client_name": {"key": "ClientName", "type": "str"},
+        "open_time": {"key": "OpenTime", "type": "rfc-1123"},
+        "last_reconnect_time": {"key": "LastReconnectTime", "type": "rfc-1123"},
+        "access_right_list": {"key": "AccessRightList", "type": "[str]", "xml": {"wrapped": True}},
+    }
+    _xml_map = {"name": "Handle"}
+
+    def __init__(
+        self,
+        *,
+        handle_id: str,
+        path: "_models.StringEncoded",
+        file_id: str,
+        session_id: str,
+        client_ip: str,
+        client_name: str,
+        open_time: datetime.datetime,
+        parent_id: Optional[str] = None,
+        last_reconnect_time: Optional[datetime.datetime] = None,
+        access_right_list: Optional[List[Union[str, "_models.AccessRight"]]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword handle_id: XSMB service handle ID. Required.
+        :paramtype handle_id: str
+        :keyword path: Required.
+        :paramtype path: ~azure.storage.fileshare.models.StringEncoded
+        :keyword file_id: FileId uniquely identifies the file or directory. Required.
+        :paramtype file_id: str
+        :keyword parent_id: ParentId uniquely identifies the parent directory of the object.
+        :paramtype parent_id: str
+        :keyword session_id: SMB session ID in context of which the file handle was opened. Required.
+        :paramtype session_id: str
+        :keyword client_ip: Client IP that opened the handle. Required.
+        :paramtype client_ip: str
+        :keyword client_name: Name of the client machine where the share is being mounted. Required.
+        :paramtype client_name: str
+        :keyword open_time: Time when the session that previously opened the handle has last been
+         reconnected. (UTC). Required.
+        :paramtype open_time: ~datetime.datetime
+        :keyword last_reconnect_time: Time handle was last connected to (UTC).
+        :paramtype last_reconnect_time: ~datetime.datetime
+        :keyword access_right_list:
+        :paramtype access_right_list: list[str or ~azure.storage.fileshare.models.AccessRight]
+        """
+        super().__init__(**kwargs)
+        self.handle_id = handle_id
+        self.path = path
+        self.file_id = file_id
+        self.parent_id = parent_id
+        self.session_id = session_id
+        self.client_ip = client_ip
+        self.client_name = client_name
+        self.open_time = open_time
+        self.last_reconnect_time = last_reconnect_time
+        self.access_right_list = access_right_list
+
+
+class LeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and
+     matches this ID.
+    :vartype lease_id: str
+    """
+
+    _attribute_map = {
+        "lease_id": {"key": "leaseId", "type": "str"},
+    }
+
+    def __init__(self, *, lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active
+         and matches this ID.
+        :paramtype lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.lease_id = lease_id
+
+
+class ListFilesAndDirectoriesSegmentResponse(_serialization.Model):
+    """An enumeration of directories and files.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar share_name: Required.
+    :vartype share_name: str
+    :ivar share_snapshot:
+    :vartype share_snapshot: str
+    :ivar encoded:
+    :vartype encoded: bool
+    :ivar directory_path: Required.
+    :vartype directory_path: str
+    :ivar prefix: Required.
+    :vartype prefix: ~azure.storage.fileshare.models.StringEncoded
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar segment: Abstract for entries that can be listed from Directory. Required.
+    :vartype segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
+    :ivar next_marker: Required.
+    :vartype next_marker: str
+    :ivar directory_id:
+    :vartype directory_id: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "share_name": {"required": True},
+        "directory_path": {"required": True},
+        "prefix": {"required": True},
+        "segment": {"required": True},
+        "next_marker": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "share_name": {"key": "ShareName", "type": "str", "xml": {"attr": True}},
+        "share_snapshot": {"key": "ShareSnapshot", "type": "str", "xml": {"attr": True}},
+        "encoded": {"key": "Encoded", "type": "bool", "xml": {"attr": True}},
+        "directory_path": {"key": "DirectoryPath", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "StringEncoded"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "segment": {"key": "Segment", "type": "FilesAndDirectoriesListSegment"},
+        "next_marker": {"key": "NextMarker", "type": "str"},
+        "directory_id": {"key": "DirectoryId", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        share_name: str,
+        directory_path: str,
+        prefix: "_models.StringEncoded",
+        segment: "_models.FilesAndDirectoriesListSegment",
+        next_marker: str,
+        share_snapshot: Optional[str] = None,
+        encoded: Optional[bool] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        directory_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword share_name: Required.
+        :paramtype share_name: str
+        :keyword share_snapshot:
+        :paramtype share_snapshot: str
+        :keyword encoded:
+        :paramtype encoded: bool
+        :keyword directory_path: Required.
+        :paramtype directory_path: str
+        :keyword prefix: Required.
+        :paramtype prefix: ~azure.storage.fileshare.models.StringEncoded
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword segment: Abstract for entries that can be listed from Directory. Required.
+        :paramtype segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
+        :keyword next_marker: Required.
+        :paramtype next_marker: str
+        :keyword directory_id:
+        :paramtype directory_id: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.share_name = share_name
+        self.share_snapshot = share_snapshot
+        self.encoded = encoded
+        self.directory_path = directory_path
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.segment = segment
+        self.next_marker = next_marker
+        self.directory_id = directory_id
+
+
+class ListHandlesResponse(_serialization.Model):
+    """An enumeration of handles.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar handle_list:
+    :vartype handle_list: list[~azure.storage.fileshare.models.HandleItem]
+    :ivar next_marker: Required.
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "next_marker": {"required": True},
+    }
+
+    _attribute_map = {
+        "handle_list": {
+            "key": "HandleList",
+            "type": "[HandleItem]",
+            "xml": {"name": "Entries", "wrapped": True, "itemsName": "Handle"},
+        },
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self, *, next_marker: str, handle_list: Optional[List["_models.HandleItem"]] = None, **kwargs: Any
+    ) -> None:
+        """
+        :keyword handle_list:
+        :paramtype handle_list: list[~azure.storage.fileshare.models.HandleItem]
+        :keyword next_marker: Required.
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.handle_list = handle_list
+        self.next_marker = next_marker
+
+
+class ListSharesResponse(_serialization.Model):
+    """An enumeration of shares.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar service_endpoint: Required.
+    :vartype service_endpoint: str
+    :ivar prefix:
+    :vartype prefix: str
+    :ivar marker:
+    :vartype marker: str
+    :ivar max_results:
+    :vartype max_results: int
+    :ivar share_items:
+    :vartype share_items: list[~azure.storage.fileshare.models.ShareItemInternal]
+    :ivar next_marker: Required.
+    :vartype next_marker: str
+    """
+
+    _validation = {
+        "service_endpoint": {"required": True},
+        "next_marker": {"required": True},
+    }
+
+    _attribute_map = {
+        "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}},
+        "prefix": {"key": "Prefix", "type": "str"},
+        "marker": {"key": "Marker", "type": "str"},
+        "max_results": {"key": "MaxResults", "type": "int"},
+        "share_items": {
+            "key": "ShareItems",
+            "type": "[ShareItemInternal]",
+            "xml": {"name": "Shares", "wrapped": True, "itemsName": "Share"},
+        },
+        "next_marker": {"key": "NextMarker", "type": "str"},
+    }
+    _xml_map = {"name": "EnumerationResults"}
+
+    def __init__(
+        self,
+        *,
+        service_endpoint: str,
+        next_marker: str,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        max_results: Optional[int] = None,
+        share_items: Optional[List["_models.ShareItemInternal"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword service_endpoint: Required.
+        :paramtype service_endpoint: str
+        :keyword prefix:
+        :paramtype prefix: str
+        :keyword marker:
+        :paramtype marker: str
+        :keyword max_results:
+        :paramtype max_results: int
+        :keyword share_items:
+        :paramtype share_items: list[~azure.storage.fileshare.models.ShareItemInternal]
+        :keyword next_marker: Required.
+        :paramtype next_marker: str
+        """
+        super().__init__(**kwargs)
+        self.service_endpoint = service_endpoint
+        self.prefix = prefix
+        self.marker = marker
+        self.max_results = max_results
+        self.share_items = share_items
+        self.next_marker = next_marker
+
+
+class Metrics(_serialization.Model):
+    """Storage Analytics metrics for file service.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar version: The version of Storage Analytics to configure. Required.
+    :vartype version: str
+    :ivar enabled: Indicates whether metrics are enabled for the File service. Required.
+    :vartype enabled: bool
+    :ivar include_apis: Indicates whether metrics should generate summary statistics for called API
+     operations.
+    :vartype include_apis: bool
+    :ivar retention_policy: The retention policy.
+    :vartype retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
+    """
+
+    _validation = {
+        "version": {"required": True},
+        "enabled": {"required": True},
+    }
+
+    _attribute_map = {
+        "version": {"key": "Version", "type": "str"},
+        "enabled": {"key": "Enabled", "type": "bool"},
+        "include_apis": {"key": "IncludeAPIs", "type": "bool"},
+        "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"},
+    }
+
+    def __init__(
+        self,
+        *,
+        version: str,
+        enabled: bool,
+        include_apis: Optional[bool] = None,
+        retention_policy: Optional["_models.RetentionPolicy"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword version: The version of Storage Analytics to configure. Required.
+        :paramtype version: str
+        :keyword enabled: Indicates whether metrics are enabled for the File service. Required.
+        :paramtype enabled: bool
+        :keyword include_apis: Indicates whether metrics should generate summary statistics for called
+         API operations.
+        :paramtype include_apis: bool
+        :keyword retention_policy: The retention policy.
+        :paramtype retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
+        """
+        super().__init__(**kwargs)
+        self.version = version
+        self.enabled = enabled
+        self.include_apis = include_apis
+        self.retention_policy = retention_policy
+
+
+class RetentionPolicy(_serialization.Model):
+    """The retention policy.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar enabled: Indicates whether a retention policy is enabled for the File service. If false,
+     metrics data is retained, and the user is responsible for deleting it. Required.
+    :vartype enabled: bool
+    :ivar days: Indicates the number of days that metrics data should be retained. All data older
+     than this value will be deleted. Metrics data is deleted on a best-effort basis after the
+     retention period expires.
+    :vartype days: int
+    """
+
+    _validation = {
+        "enabled": {"required": True},
+        "days": {"maximum": 365, "minimum": 1},
+    }
+
+    _attribute_map = {
+        "enabled": {"key": "Enabled", "type": "bool"},
+        "days": {"key": "Days", "type": "int"},
+    }
+
+    def __init__(self, *, enabled: bool, days: Optional[int] = None, **kwargs: Any) -> None:
+        """
+        :keyword enabled: Indicates whether a retention policy is enabled for the File service. If
+         false, metrics data is retained, and the user is responsible for deleting it. Required.
+        :paramtype enabled: bool
+        :keyword days: Indicates the number of days that metrics data should be retained. All data
+         older than this value will be deleted. Metrics data is deleted on a best-effort basis after the
+         retention period expires.
+        :paramtype days: int
+        """
+        super().__init__(**kwargs)
+        self.enabled = enabled
+        self.days = days
+
+
+class ShareFileRangeList(_serialization.Model):
+    """The list of file ranges.
+
+    :ivar ranges:
+    :vartype ranges: list[~azure.storage.fileshare.models.FileRange]
+    :ivar clear_ranges:
+    :vartype clear_ranges: list[~azure.storage.fileshare.models.ClearRange]
+    """
+
+    _attribute_map = {
+        "ranges": {"key": "Ranges", "type": "[FileRange]", "xml": {"itemsName": "Range"}},
+        "clear_ranges": {"key": "ClearRanges", "type": "[ClearRange]", "xml": {"itemsName": "ClearRange"}},
+    }
+
+    def __init__(
+        self,
+        *,
+        ranges: Optional[List["_models.FileRange"]] = None,
+        clear_ranges: Optional[List["_models.ClearRange"]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword ranges:
+        :paramtype ranges: list[~azure.storage.fileshare.models.FileRange]
+        :keyword clear_ranges:
+        :paramtype clear_ranges: list[~azure.storage.fileshare.models.ClearRange]
+        """
+        super().__init__(**kwargs)
+        self.ranges = ranges
+        self.clear_ranges = clear_ranges
+
+
+class ShareItemInternal(_serialization.Model):
+    """A listed Azure Storage share item.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar name: Required.
+    :vartype name: str
+    :ivar snapshot:
+    :vartype snapshot: str
+    :ivar deleted:
+    :vartype deleted: bool
+    :ivar version:
+    :vartype version: str
+    :ivar properties: Properties of a share. Required.
+    :vartype properties: ~azure.storage.fileshare.models.SharePropertiesInternal
+    :ivar metadata: Dictionary of :code:`<string>`.
+    :vartype metadata: dict[str, str]
+    """
+
+    _validation = {
+        "name": {"required": True},
+        "properties": {"required": True},
+    }
+
+    _attribute_map = {
+        "name": {"key": "Name", "type": "str"},
+        "snapshot": {"key": "Snapshot", "type": "str"},
+        "deleted": {"key": "Deleted", "type": "bool"},
+        "version": {"key": "Version", "type": "str"},
+        "properties": {"key": "Properties", "type": "SharePropertiesInternal"},
+        "metadata": {"key": "Metadata", "type": "{str}"},
+    }
+    _xml_map = {"name": "Share"}
+
+    def __init__(
+        self,
+        *,
+        name: str,
+        properties: "_models.SharePropertiesInternal",
+        snapshot: Optional[str] = None,
+        deleted: Optional[bool] = None,
+        version: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword name: Required.
+        :paramtype name: str
+        :keyword snapshot:
+        :paramtype snapshot: str
+        :keyword deleted:
+        :paramtype deleted: bool
+        :keyword version:
+        :paramtype version: str
+        :keyword properties: Properties of a share. Required.
+        :paramtype properties: ~azure.storage.fileshare.models.SharePropertiesInternal
+        :keyword metadata: Dictionary of :code:`<string>`.
+        :paramtype metadata: dict[str, str]
+        """
+        super().__init__(**kwargs)
+        self.name = name
+        self.snapshot = snapshot
+        self.deleted = deleted
+        self.version = version
+        self.properties = properties
+        self.metadata = metadata
+
+
+class SharePermission(_serialization.Model):
+    """A permission (a security descriptor) at the share level.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar permission: The permission in the Security Descriptor Definition Language (SDDL).
+     Required.
+    :vartype permission: str
+    :ivar format: Known values are: "Sddl" and "Binary".
+    :vartype format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+    """
+
+    _validation = {
+        "permission": {"required": True},
+    }
+
+    _attribute_map = {
+        "permission": {"key": "permission", "type": "str"},
+        "format": {"key": "format", "type": "str"},
+    }
+
+    def __init__(
+        self, *, permission: str, format: Optional[Union[str, "_models.FilePermissionFormat"]] = None, **kwargs: Any
+    ) -> None:
+        """
+        :keyword permission: The permission in the Security Descriptor Definition Language (SDDL).
+         Required.
+        :paramtype permission: str
+        :keyword format: Known values are: "Sddl" and "Binary".
+        :paramtype format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        """
+        super().__init__(**kwargs)
+        self.permission = permission
+        self.format = format
+
+
+class SharePropertiesInternal(_serialization.Model):
+    """Properties of a share.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar last_modified: Required.
+    :vartype last_modified: ~datetime.datetime
+    :ivar etag: Required.
+    :vartype etag: str
+    :ivar quota: Required.
+    :vartype quota: int
+    :ivar provisioned_iops:
+    :vartype provisioned_iops: int
+    :ivar provisioned_ingress_m_bps:
+    :vartype provisioned_ingress_m_bps: int
+    :ivar provisioned_egress_m_bps:
+    :vartype provisioned_egress_m_bps: int
+    :ivar provisioned_bandwidth_mi_bps:
+    :vartype provisioned_bandwidth_mi_bps: int
+    :ivar next_allowed_quota_downgrade_time:
+    :vartype next_allowed_quota_downgrade_time: ~datetime.datetime
+    :ivar deleted_time:
+    :vartype deleted_time: ~datetime.datetime
+    :ivar remaining_retention_days:
+    :vartype remaining_retention_days: int
+    :ivar access_tier:
+    :vartype access_tier: str
+    :ivar access_tier_change_time:
+    :vartype access_tier_change_time: ~datetime.datetime
+    :ivar access_tier_transition_state:
+    :vartype access_tier_transition_state: str
+    :ivar lease_status: The current lease status of the share. Known values are: "locked" and
+     "unlocked".
+    :vartype lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType
+    :ivar lease_state: Lease state of the share. Known values are: "available", "leased",
+     "expired", "breaking", and "broken".
+    :vartype lease_state: str or ~azure.storage.fileshare.models.LeaseStateType
+    :ivar lease_duration: When a share is leased, specifies whether the lease is of infinite or
+     fixed duration. Known values are: "infinite" and "fixed".
+    :vartype lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType
+    :ivar enabled_protocols:
+    :vartype enabled_protocols: str
+    :ivar root_squash: Known values are: "NoRootSquash", "RootSquash", and "AllSquash".
+    :vartype root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+    :ivar enable_snapshot_virtual_directory_access:
+    :vartype enable_snapshot_virtual_directory_access: bool
+    :ivar paid_bursting_enabled:
+    :vartype paid_bursting_enabled: bool
+    :ivar paid_bursting_max_iops:
+    :vartype paid_bursting_max_iops: int
+    :ivar paid_bursting_max_bandwidth_mibps:
+    :vartype paid_bursting_max_bandwidth_mibps: int
+    :ivar included_burst_iops:
+    :vartype included_burst_iops: int
+    :ivar max_burst_credits_for_iops:
+    :vartype max_burst_credits_for_iops: int
+    :ivar next_allowed_provisioned_iops_downgrade_time:
+    :vartype next_allowed_provisioned_iops_downgrade_time: ~datetime.datetime
+    :ivar next_allowed_provisioned_bandwidth_downgrade_time:
+    :vartype next_allowed_provisioned_bandwidth_downgrade_time: ~datetime.datetime
+    """
+
+    _validation = {
+        "last_modified": {"required": True},
+        "etag": {"required": True},
+        "quota": {"required": True},
+    }
+
+    _attribute_map = {
+        "last_modified": {"key": "Last-Modified", "type": "rfc-1123"},
+        "etag": {"key": "Etag", "type": "str"},
+        "quota": {"key": "Quota", "type": "int"},
+        "provisioned_iops": {"key": "ProvisionedIops", "type": "int"},
+        "provisioned_ingress_m_bps": {"key": "ProvisionedIngressMBps", "type": "int"},
+        "provisioned_egress_m_bps": {"key": "ProvisionedEgressMBps", "type": "int"},
+        "provisioned_bandwidth_mi_bps": {"key": "ProvisionedBandwidthMiBps", "type": "int"},
+        "next_allowed_quota_downgrade_time": {"key": "NextAllowedQuotaDowngradeTime", "type": "rfc-1123"},
+        "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"},
+        "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"},
+        "access_tier": {"key": "AccessTier", "type": "str"},
+        "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"},
+        "access_tier_transition_state": {"key": "AccessTierTransitionState", "type": "str"},
+        "lease_status": {"key": "LeaseStatus", "type": "str"},
+        "lease_state": {"key": "LeaseState", "type": "str"},
+        "lease_duration": {"key": "LeaseDuration", "type": "str"},
+        "enabled_protocols": {"key": "EnabledProtocols", "type": "str"},
+        "root_squash": {"key": "RootSquash", "type": "str"},
+        "enable_snapshot_virtual_directory_access": {"key": "EnableSnapshotVirtualDirectoryAccess", "type": "bool"},
+        "paid_bursting_enabled": {"key": "PaidBurstingEnabled", "type": "bool"},
+        "paid_bursting_max_iops": {"key": "PaidBurstingMaxIops", "type": "int"},
+        "paid_bursting_max_bandwidth_mibps": {"key": "PaidBurstingMaxBandwidthMibps", "type": "int"},
+        "included_burst_iops": {"key": "IncludedBurstIops", "type": "int"},
+        "max_burst_credits_for_iops": {"key": "MaxBurstCreditsForIops", "type": "int"},
+        "next_allowed_provisioned_iops_downgrade_time": {
+            "key": "NextAllowedProvisionedIopsDowngradeTime",
+            "type": "rfc-1123",
+        },
+        "next_allowed_provisioned_bandwidth_downgrade_time": {
+            "key": "NextAllowedProvisionedBandwidthDowngradeTime",
+            "type": "rfc-1123",
+        },
+    }
+
+    def __init__(  # pylint: disable=too-many-locals
+        self,
+        *,
+        last_modified: datetime.datetime,
+        etag: str,
+        quota: int,
+        provisioned_iops: Optional[int] = None,
+        provisioned_ingress_m_bps: Optional[int] = None,
+        provisioned_egress_m_bps: Optional[int] = None,
+        provisioned_bandwidth_mi_bps: Optional[int] = None,
+        next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None,
+        deleted_time: Optional[datetime.datetime] = None,
+        remaining_retention_days: Optional[int] = None,
+        access_tier: Optional[str] = None,
+        access_tier_change_time: Optional[datetime.datetime] = None,
+        access_tier_transition_state: Optional[str] = None,
+        lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None,
+        lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None,
+        lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None,
+        enabled_protocols: Optional[str] = None,
+        root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None,
+        enable_snapshot_virtual_directory_access: Optional[bool] = None,
+        paid_bursting_enabled: Optional[bool] = None,
+        paid_bursting_max_iops: Optional[int] = None,
+        paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+        included_burst_iops: Optional[int] = None,
+        max_burst_credits_for_iops: Optional[int] = None,
+        next_allowed_provisioned_iops_downgrade_time: Optional[datetime.datetime] = None,
+        next_allowed_provisioned_bandwidth_downgrade_time: Optional[datetime.datetime] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword last_modified: Required.
+        :paramtype last_modified: ~datetime.datetime
+        :keyword etag: Required.
+        :paramtype etag: str
+        :keyword quota: Required.
+        :paramtype quota: int
+        :keyword provisioned_iops:
+        :paramtype provisioned_iops: int
+        :keyword provisioned_ingress_m_bps:
+        :paramtype provisioned_ingress_m_bps: int
+        :keyword provisioned_egress_m_bps:
+        :paramtype provisioned_egress_m_bps: int
+        :keyword provisioned_bandwidth_mi_bps:
+        :paramtype provisioned_bandwidth_mi_bps: int
+        :keyword next_allowed_quota_downgrade_time:
+        :paramtype next_allowed_quota_downgrade_time: ~datetime.datetime
+        :keyword deleted_time:
+        :paramtype deleted_time: ~datetime.datetime
+        :keyword remaining_retention_days:
+        :paramtype remaining_retention_days: int
+        :keyword access_tier:
+        :paramtype access_tier: str
+        :keyword access_tier_change_time:
+        :paramtype access_tier_change_time: ~datetime.datetime
+        :keyword access_tier_transition_state:
+        :paramtype access_tier_transition_state: str
+        :keyword lease_status: The current lease status of the share. Known values are: "locked" and
+         "unlocked".
+        :paramtype lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType
+        :keyword lease_state: Lease state of the share. Known values are: "available", "leased",
+         "expired", "breaking", and "broken".
+        :paramtype lease_state: str or ~azure.storage.fileshare.models.LeaseStateType
+        :keyword lease_duration: When a share is leased, specifies whether the lease is of infinite or
+         fixed duration. Known values are: "infinite" and "fixed".
+        :paramtype lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType
+        :keyword enabled_protocols:
+        :paramtype enabled_protocols: str
+        :keyword root_squash: Known values are: "NoRootSquash", "RootSquash", and "AllSquash".
+        :paramtype root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+        :keyword enable_snapshot_virtual_directory_access:
+        :paramtype enable_snapshot_virtual_directory_access: bool
+        :keyword paid_bursting_enabled:
+        :paramtype paid_bursting_enabled: bool
+        :keyword paid_bursting_max_iops:
+        :paramtype paid_bursting_max_iops: int
+        :keyword paid_bursting_max_bandwidth_mibps:
+        :paramtype paid_bursting_max_bandwidth_mibps: int
+        :keyword included_burst_iops:
+        :paramtype included_burst_iops: int
+        :keyword max_burst_credits_for_iops:
+        :paramtype max_burst_credits_for_iops: int
+        :keyword next_allowed_provisioned_iops_downgrade_time:
+        :paramtype next_allowed_provisioned_iops_downgrade_time: ~datetime.datetime
+        :keyword next_allowed_provisioned_bandwidth_downgrade_time:
+        :paramtype next_allowed_provisioned_bandwidth_downgrade_time: ~datetime.datetime
+        """
+        super().__init__(**kwargs)
+        self.last_modified = last_modified
+        self.etag = etag
+        self.quota = quota
+        self.provisioned_iops = provisioned_iops
+        self.provisioned_ingress_m_bps = provisioned_ingress_m_bps
+        self.provisioned_egress_m_bps = provisioned_egress_m_bps
+        self.provisioned_bandwidth_mi_bps = provisioned_bandwidth_mi_bps
+        self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time
+        self.deleted_time = deleted_time
+        self.remaining_retention_days = remaining_retention_days
+        self.access_tier = access_tier
+        self.access_tier_change_time = access_tier_change_time
+        self.access_tier_transition_state = access_tier_transition_state
+        self.lease_status = lease_status
+        self.lease_state = lease_state
+        self.lease_duration = lease_duration
+        self.enabled_protocols = enabled_protocols
+        self.root_squash = root_squash
+        self.enable_snapshot_virtual_directory_access = enable_snapshot_virtual_directory_access
+        self.paid_bursting_enabled = paid_bursting_enabled
+        self.paid_bursting_max_iops = paid_bursting_max_iops
+        self.paid_bursting_max_bandwidth_mibps = paid_bursting_max_bandwidth_mibps
+        self.included_burst_iops = included_burst_iops
+        self.max_burst_credits_for_iops = max_burst_credits_for_iops
+        self.next_allowed_provisioned_iops_downgrade_time = next_allowed_provisioned_iops_downgrade_time
+        self.next_allowed_provisioned_bandwidth_downgrade_time = next_allowed_provisioned_bandwidth_downgrade_time
+
+
+class ShareProtocolSettings(_serialization.Model):
+    """Protocol settings.
+
+    :ivar smb: Settings for SMB protocol.
+    :vartype smb: ~azure.storage.fileshare.models.ShareSmbSettings
+    """
+
+    _attribute_map = {
+        "smb": {"key": "Smb", "type": "ShareSmbSettings"},
+    }
+    _xml_map = {"name": "ProtocolSettings"}
+
+    def __init__(self, *, smb: Optional["_models.ShareSmbSettings"] = None, **kwargs: Any) -> None:
+        """
+        :keyword smb: Settings for SMB protocol.
+        :paramtype smb: ~azure.storage.fileshare.models.ShareSmbSettings
+        """
+        super().__init__(**kwargs)
+        self.smb = smb
+
+
+class ShareSmbSettings(_serialization.Model):
+    """Settings for SMB protocol.
+
+    :ivar multichannel: Settings for SMB Multichannel.
+    :vartype multichannel: ~azure.storage.fileshare.models.SmbMultichannel
+    """
+
+    _attribute_map = {
+        "multichannel": {"key": "Multichannel", "type": "SmbMultichannel"},
+    }
+    _xml_map = {"name": "SMB"}
+
+    def __init__(self, *, multichannel: Optional["_models.SmbMultichannel"] = None, **kwargs: Any) -> None:
+        """
+        :keyword multichannel: Settings for SMB Multichannel.
+        :paramtype multichannel: ~azure.storage.fileshare.models.SmbMultichannel
+        """
+        super().__init__(**kwargs)
+        self.multichannel = multichannel
+
+
+class ShareStats(_serialization.Model):
+    """Stats for the share.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar share_usage_bytes: The approximate size of the data stored in bytes. Note that this value
+     may not include all recently created or recently resized files. Required.
+    :vartype share_usage_bytes: int
+    """
+
+    _validation = {
+        "share_usage_bytes": {"required": True},
+    }
+
+    _attribute_map = {
+        "share_usage_bytes": {"key": "ShareUsageBytes", "type": "int"},
+    }
+
+    def __init__(self, *, share_usage_bytes: int, **kwargs: Any) -> None:
+        """
+        :keyword share_usage_bytes: The approximate size of the data stored in bytes. Note that this
+         value may not include all recently created or recently resized files. Required.
+        :paramtype share_usage_bytes: int
+        """
+        super().__init__(**kwargs)
+        self.share_usage_bytes = share_usage_bytes
+
+
+class SignedIdentifier(_serialization.Model):
+    """Signed identifier.
+
+    All required parameters must be populated in order to send to server.
+
+    :ivar id: A unique id. Required.
+    :vartype id: str
+    :ivar access_policy: The access policy.
+    :vartype access_policy: ~azure.storage.fileshare.models.AccessPolicy
+    """
+
+    _validation = {
+        "id": {"required": True},
+    }
+
+    _attribute_map = {
+        "id": {"key": "Id", "type": "str"},
+        "access_policy": {"key": "AccessPolicy", "type": "AccessPolicy"},
+    }
+
+    def __init__(
+        self,
+        *,
+        id: str,  # pylint: disable=redefined-builtin
+        access_policy: Optional["_models.AccessPolicy"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword id: A unique id. Required.
+        :paramtype id: str
+        :keyword access_policy: The access policy.
+        :paramtype access_policy: ~azure.storage.fileshare.models.AccessPolicy
+        """
+        super().__init__(**kwargs)
+        self.id = id
+        self.access_policy = access_policy
+
+
+class SmbMultichannel(_serialization.Model):
+    """Settings for SMB multichannel.
+
+    :ivar enabled: If SMB multichannel is enabled.
+    :vartype enabled: bool
+    """
+
+    _attribute_map = {
+        "enabled": {"key": "Enabled", "type": "bool"},
+    }
+    _xml_map = {"name": "Multichannel"}
+
+    def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+        """
+        :keyword enabled: If SMB multichannel is enabled.
+        :paramtype enabled: bool
+        """
+        super().__init__(**kwargs)
+        self.enabled = enabled
+
+
+class SourceLeaseAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar source_lease_id: Required if the source file has an active infinite lease.
+    :vartype source_lease_id: str
+    """
+
+    _attribute_map = {
+        "source_lease_id": {"key": "sourceLeaseId", "type": "str"},
+    }
+
+    def __init__(self, *, source_lease_id: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword source_lease_id: Required if the source file has an active infinite lease.
+        :paramtype source_lease_id: str
+        """
+        super().__init__(**kwargs)
+        self.source_lease_id = source_lease_id
+
+
+class SourceModifiedAccessConditions(_serialization.Model):
+    """Parameter group.
+
+    :ivar source_if_match_crc64: Specify the crc64 value to operate only on range with a matching
+     crc64 checksum.
+    :vartype source_if_match_crc64: bytes
+    :ivar source_if_none_match_crc64: Specify the crc64 value to operate only on range without a
+     matching crc64 checksum.
+    :vartype source_if_none_match_crc64: bytes
+    """
+
+    _attribute_map = {
+        "source_if_match_crc64": {"key": "sourceIfMatchCrc64", "type": "bytearray"},
+        "source_if_none_match_crc64": {"key": "sourceIfNoneMatchCrc64", "type": "bytearray"},
+    }
+
+    def __init__(
+        self,
+        *,
+        source_if_match_crc64: Optional[bytes] = None,
+        source_if_none_match_crc64: Optional[bytes] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword source_if_match_crc64: Specify the crc64 value to operate only on range with a
+         matching crc64 checksum.
+        :paramtype source_if_match_crc64: bytes
+        :keyword source_if_none_match_crc64: Specify the crc64 value to operate only on range without a
+         matching crc64 checksum.
+        :paramtype source_if_none_match_crc64: bytes
+        """
+        super().__init__(**kwargs)
+        self.source_if_match_crc64 = source_if_match_crc64
+        self.source_if_none_match_crc64 = source_if_none_match_crc64
+
+
+class StorageError(_serialization.Model):
+    """StorageError.
+
+    :ivar message:
+    :vartype message: str
+    :ivar authentication_error_detail:
+    :vartype authentication_error_detail: str
+    """
+
+    _attribute_map = {
+        "message": {"key": "Message", "type": "str"},
+        "authentication_error_detail": {"key": "AuthenticationErrorDetail", "type": "str"},
+    }
+
+    def __init__(
+        self, *, message: Optional[str] = None, authentication_error_detail: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        """
+        :keyword message:
+        :paramtype message: str
+        :keyword authentication_error_detail:
+        :paramtype authentication_error_detail: str
+        """
+        super().__init__(**kwargs)
+        self.message = message
+        self.authentication_error_detail = authentication_error_detail
+
+
+class StorageServiceProperties(_serialization.Model):
+    """Storage service properties.
+
+    :ivar hour_metrics: A summary of request statistics grouped by API in hourly aggregates for
+     files.
+    :vartype hour_metrics: ~azure.storage.fileshare.models.Metrics
+    :ivar minute_metrics: A summary of request statistics grouped by API in minute aggregates for
+     files.
+    :vartype minute_metrics: ~azure.storage.fileshare.models.Metrics
+    :ivar cors: The set of CORS rules.
+    :vartype cors: list[~azure.storage.fileshare.models.CorsRule]
+    :ivar protocol: Protocol settings.
+    :vartype protocol: ~azure.storage.fileshare.models.ShareProtocolSettings
+    """
+
+    _attribute_map = {
+        "hour_metrics": {"key": "HourMetrics", "type": "Metrics"},
+        "minute_metrics": {"key": "MinuteMetrics", "type": "Metrics"},
+        "cors": {"key": "Cors", "type": "[CorsRule]", "xml": {"wrapped": True}},
+        "protocol": {"key": "Protocol", "type": "ShareProtocolSettings"},
+    }
+
+    def __init__(
+        self,
+        *,
+        hour_metrics: Optional["_models.Metrics"] = None,
+        minute_metrics: Optional["_models.Metrics"] = None,
+        cors: Optional[List["_models.CorsRule"]] = None,
+        protocol: Optional["_models.ShareProtocolSettings"] = None,
+        **kwargs: Any
+    ) -> None:
+        """
+        :keyword hour_metrics: A summary of request statistics grouped by API in hourly aggregates for
+         files.
+        :paramtype hour_metrics: ~azure.storage.fileshare.models.Metrics
+        :keyword minute_metrics: A summary of request statistics grouped by API in minute aggregates
+         for files.
+        :paramtype minute_metrics: ~azure.storage.fileshare.models.Metrics
+        :keyword cors: The set of CORS rules.
+        :paramtype cors: list[~azure.storage.fileshare.models.CorsRule]
+        :keyword protocol: Protocol settings.
+        :paramtype protocol: ~azure.storage.fileshare.models.ShareProtocolSettings
+        """
+        super().__init__(**kwargs)
+        self.hour_metrics = hour_metrics
+        self.minute_metrics = minute_metrics
+        self.cors = cors
+        self.protocol = protocol
+
+
+class StringEncoded(_serialization.Model):
+    """StringEncoded.
+
+    :ivar encoded:
+    :vartype encoded: bool
+    :ivar content:
+    :vartype content: str
+    """
+
+    _attribute_map = {
+        "encoded": {"key": "Encoded", "type": "bool", "xml": {"name": "Encoded", "attr": True}},
+        "content": {"key": "content", "type": "str", "xml": {"text": True}},
+    }
+
+    def __init__(self, *, encoded: Optional[bool] = None, content: Optional[str] = None, **kwargs: Any) -> None:
+        """
+        :keyword encoded:
+        :paramtype encoded: bool
+        :keyword content:
+        :paramtype content: str
+        """
+        super().__init__(**kwargs)
+        self.encoded = encoded
+        self.content = content
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/models/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/__init__.py
new file mode 100644
index 00000000..092b7efd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/__init__.py
@@ -0,0 +1,31 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from ._patch import *  # pylint: disable=unused-wildcard-import
+
+from ._service_operations import ServiceOperations  # type: ignore
+from ._share_operations import ShareOperations  # type: ignore
+from ._directory_operations import DirectoryOperations  # type: ignore
+from ._file_operations import FileOperations  # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+    "ServiceOperations",
+    "ShareOperations",
+    "DirectoryOperations",
+    "FileOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__])  # pyright: ignore
+_patch_sdk()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_directory_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_directory_operations.py
new file mode 100644
index 00000000..18e4eabd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_directory_operations.py
@@ -0,0 +1,1570 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureFileStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    file_attributes: str = "none",
+    file_creation_time: str = "now",
+    file_last_write_time: str = "now",
+    file_change_time: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_mode: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if file_mode is not None:
+        _headers["x-ms-mode"] = _SERIALIZER.header("file_mode", file_mode, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    sharesnapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_properties_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    file_attributes: str = "none",
+    file_creation_time: str = "now",
+    file_last_write_time: str = "now",
+    file_change_time: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_mode: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if file_mode is not None:
+        _headers["x-ms-mode"] = _SERIALIZER.header("file_mode", file_mode, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_metadata_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_files_and_directories_segment_request(  # pylint: disable=name-too-long
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    timeout: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None,
+    include_extended_info: Optional[bool] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if include_extended_info is not None:
+        _headers["x-ms-file-extended-info"] = _SERIALIZER.header("include_extended_info", include_extended_info, "bool")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_handles_request(
+    url: str,
+    *,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    timeout: Optional[int] = None,
+    sharesnapshot: Optional[str] = None,
+    recursive: Optional[bool] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    if recursive is not None:
+        _headers["x-ms-recursive"] = _SERIALIZER.header("recursive", recursive, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_force_close_handles_request(
+    url: str,
+    *,
+    handle_id: str,
+    timeout: Optional[int] = None,
+    marker: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    recursive: Optional[bool] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-handle-id"] = _SERIALIZER.header("handle_id", handle_id, "str")
+    if recursive is not None:
+        _headers["x-ms-recursive"] = _SERIALIZER.header("recursive", recursive, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_rename_request(
+    url: str,
+    *,
+    rename_source: str,
+    timeout: Optional[int] = None,
+    replace_if_exists: Optional[bool] = None,
+    ignore_read_only: Optional[bool] = None,
+    source_lease_id: Optional[str] = None,
+    destination_lease_id: Optional[str] = None,
+    file_attributes: Optional[str] = None,
+    file_creation_time: Optional[str] = None,
+    file_last_write_time: Optional[str] = None,
+    file_change_time: Optional[str] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    allow_source_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+    comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-file-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str")
+    if replace_if_exists is not None:
+        _headers["x-ms-file-rename-replace-if-exists"] = _SERIALIZER.header(
+            "replace_if_exists", replace_if_exists, "bool"
+        )
+    if ignore_read_only is not None:
+        _headers["x-ms-file-rename-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool")
+    if source_lease_id is not None:
+        _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str")
+    if destination_lease_id is not None:
+        _headers["x-ms-destination-lease-id"] = _SERIALIZER.header("destination_lease_id", destination_lease_id, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if allow_source_trailing_dot is not None:
+        _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header(
+            "allow_source_trailing_dot", allow_source_trailing_dot, "bool"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class DirectoryOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.AzureFileStorage`'s
+        :attr:`directory` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new directory under the specified share or parent directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self, sharesnapshot: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all system properties for the specified directory, and can also be used to check the
+        existence of a directory. The data returned does not include the files in the directory or any
+        subdirectories.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Removes the specified empty directory. Note that the directory must be empty before it can be
+        deleted.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties on the directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_metadata(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Updates user defined metadata for the specified directory.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def list_files_and_directories_segment(
+        self,
+        prefix: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None,
+        include_extended_info: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.ListFilesAndDirectoriesSegmentResponse:
+        # pylint: disable=line-too-long
+        """Returns a list of files or directories under the specified share or directory. It lists the
+        contents only for a single level of the directory hierarchy.
+
+        :param prefix: Filters the results to return only entries whose name begins with the specified
+         prefix. Default value is None.
+        :type prefix: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType]
+        :param include_extended_info: Include extended information. Default value is None.
+        :type include_extended_info: bool
+        :return: ListFilesAndDirectoriesSegmentResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListFilesAndDirectoriesSegmentResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_files_and_directories_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            sharesnapshot=sharesnapshot,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            include=include,
+            include_extended_info=include_extended_info,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListFilesAndDirectoriesSegmentResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_handles(
+        self,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        recursive: Optional[bool] = None,
+        **kwargs: Any
+    ) -> _models.ListHandlesResponse:
+        # pylint: disable=line-too-long
+        """Lists handles for directory.
+
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param recursive: Specifies operation should apply to the directory specified in the URI, its
+         files, its subdirectories and their files. Default value is None.
+        :type recursive: bool
+        :return: ListHandlesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+        cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_handles_request(
+            url=self._config.url,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            recursive=recursive,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListHandlesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def force_close_handles(  # pylint: disable=inconsistent-return-statements
+        self,
+        handle_id: str,
+        timeout: Optional[int] = None,
+        marker: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        recursive: Optional[bool] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Closes all handles open for given directory.
+
+        :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk
+         (‘*’) is a wildcard that specifies all handles. Required.
+        :type handle_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param recursive: Specifies operation should apply to the directory specified in the URI, its
+         files, its subdirectories and their files. Default value is None.
+        :type recursive: bool
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_force_close_handles_request(
+            url=self._config.url,
+            handle_id=handle_id,
+            timeout=timeout,
+            marker=marker,
+            sharesnapshot=sharesnapshot,
+            recursive=recursive,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker"))
+        response_headers["x-ms-number-of-handles-closed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-closed")
+        )
+        response_headers["x-ms-number-of-handles-failed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-failed")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def rename(  # pylint: disable=inconsistent-return-statements
+        self,
+        rename_source: str,
+        timeout: Optional[int] = None,
+        replace_if_exists: Optional[bool] = None,
+        ignore_read_only: Optional[bool] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None,
+        destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames a directory.
+
+        :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in
+         length. Required.
+        :type rename_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param replace_if_exists: Optional. A boolean value for if the destination file already exists,
+         whether this request will overwrite the file or not. If true, the rename will succeed and will
+         overwrite the destination file. If not provided or if false and the destination file does
+         exist, the request will not overwrite the destination file. If provided and the destination
+         file doesn’t exist, the rename will succeed. Note: This value does not override the
+         x-ms-file-copy-ignore-read-only header value. Default value is None.
+        :type replace_if_exists: bool
+        :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly
+         attribute on a preexisting destination file should be respected. If true, the rename will
+         succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will
+         cause the rename to fail. Default value is None.
+        :type ignore_read_only: bool
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param source_lease_access_conditions: Parameter group. Default value is None.
+        :type source_lease_access_conditions:
+         ~azure.storage.fileshare.models.SourceLeaseAccessConditions
+        :param destination_lease_access_conditions: Parameter group. Default value is None.
+        :type destination_lease_access_conditions:
+         ~azure.storage.fileshare.models.DestinationLeaseAccessConditions
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory"))
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_lease_id = None
+        _destination_lease_id = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        if source_lease_access_conditions is not None:
+            _source_lease_id = source_lease_access_conditions.source_lease_id
+        if destination_lease_access_conditions is not None:
+            _destination_lease_id = destination_lease_access_conditions.destination_lease_id
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+
+        _request = build_rename_request(
+            url=self._config.url,
+            rename_source=rename_source,
+            timeout=timeout,
+            replace_if_exists=replace_if_exists,
+            ignore_read_only=ignore_read_only,
+            source_lease_id=_source_lease_id,
+            destination_lease_id=_destination_lease_id,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            metadata=metadata,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_file_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_file_operations.py
new file mode 100644
index 00000000..d67f90d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_file_operations.py
@@ -0,0 +1,3755 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, IO, Iterator, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    StreamClosedError,
+    StreamConsumedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureFileStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    file_content_length: int,
+    timeout: Optional[int] = None,
+    file_content_type: Optional[str] = None,
+    file_content_encoding: Optional[str] = None,
+    file_content_language: Optional[str] = None,
+    file_cache_control: Optional[str] = None,
+    file_content_md5: Optional[bytes] = None,
+    file_content_disposition: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    file_attributes: str = "none",
+    file_creation_time: str = "now",
+    file_last_write_time: str = "now",
+    file_change_time: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_mode: Optional[str] = None,
+    nfs_file_type: Optional[Union[str, _models.NfsFileType]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-content-length"] = _SERIALIZER.header("file_content_length", file_content_length, "int")
+    _headers["x-ms-type"] = _SERIALIZER.header("file_type_constant", file_type_constant, "str")
+    if file_content_type is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str")
+    if file_content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("file_content_encoding", file_content_encoding, "str")
+    if file_content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("file_content_language", file_content_language, "str")
+    if file_cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("file_cache_control", file_cache_control, "str")
+    if file_content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("file_content_md5", file_content_md5, "bytearray")
+    if file_content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header(
+            "file_content_disposition", file_content_disposition, "str"
+        )
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if file_mode is not None:
+        _headers["x-ms-mode"] = _SERIALIZER.header("file_mode", file_mode, "str")
+    if nfs_file_type is not None:
+        _headers["x-ms-file-file-type"] = _SERIALIZER.header("nfs_file_type", nfs_file_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_download_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    range_get_content_md5: Optional[bool] = None,
+    structured_body_type: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if range_get_content_md5 is not None:
+        _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header(
+            "range_get_content_md5", range_get_content_md5, "bool"
+        )
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    sharesnapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_http_headers_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    file_content_length: Optional[int] = None,
+    file_content_type: Optional[str] = None,
+    file_content_encoding: Optional[str] = None,
+    file_content_language: Optional[str] = None,
+    file_cache_control: Optional[str] = None,
+    file_content_md5: Optional[bytes] = None,
+    file_content_disposition: Optional[str] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    file_attributes: str = "none",
+    file_creation_time: str = "now",
+    file_last_write_time: str = "now",
+    file_change_time: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_mode: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_content_length is not None:
+        _headers["x-ms-content-length"] = _SERIALIZER.header("file_content_length", file_content_length, "int")
+    if file_content_type is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str")
+    if file_content_encoding is not None:
+        _headers["x-ms-content-encoding"] = _SERIALIZER.header("file_content_encoding", file_content_encoding, "str")
+    if file_content_language is not None:
+        _headers["x-ms-content-language"] = _SERIALIZER.header("file_content_language", file_content_language, "str")
+    if file_cache_control is not None:
+        _headers["x-ms-cache-control"] = _SERIALIZER.header("file_cache_control", file_cache_control, "str")
+    if file_content_md5 is not None:
+        _headers["x-ms-content-md5"] = _SERIALIZER.header("file_content_md5", file_content_md5, "bytearray")
+    if file_content_disposition is not None:
+        _headers["x-ms-content-disposition"] = _SERIALIZER.header(
+            "file_content_disposition", file_content_disposition, "str"
+        )
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if file_mode is not None:
+        _headers["x-ms-mode"] = _SERIALIZER.header("file_mode", file_mode, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_metadata_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_acquire_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_release_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_change_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_break_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_upload_range_request(
+    url: str,
+    *,
+    range: str,
+    content_length: int,
+    timeout: Optional[int] = None,
+    file_range_write: Union[str, _models.FileRangeWriteType] = "update",
+    content_md5: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+    structured_body_type: Optional[str] = None,
+    structured_content_length: Optional[int] = None,
+    content: Optional[IO[bytes]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    _headers["x-ms-write"] = _SERIALIZER.header("file_range_write", file_range_write, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if content_md5 is not None:
+        _headers["Content-MD5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_last_written_mode is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header(
+            "file_last_written_mode", file_last_written_mode, "str"
+        )
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if structured_body_type is not None:
+        _headers["x-ms-structured-body"] = _SERIALIZER.header("structured_body_type", structured_body_type, "str")
+    if structured_content_length is not None:
+        _headers["x-ms-structured-content-length"] = _SERIALIZER.header(
+            "structured_content_length", structured_content_length, "int"
+        )
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_upload_range_from_url_request(
+    url: str,
+    *,
+    range: str,
+    copy_source: str,
+    content_length: int,
+    timeout: Optional[int] = None,
+    source_range: Optional[str] = None,
+    source_content_crc64: Optional[bytes] = None,
+    source_if_match_crc64: Optional[bytes] = None,
+    source_if_none_match_crc64: Optional[bytes] = None,
+    lease_id: Optional[str] = None,
+    copy_source_authorization: Optional[str] = None,
+    file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    allow_source_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+    file_range_write_from_url: Literal["update"] = kwargs.pop(
+        "file_range_write_from_url", _headers.pop("x-ms-write", "update")
+    )
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    if source_range is not None:
+        _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str")
+    _headers["x-ms-write"] = _SERIALIZER.header("file_range_write_from_url", file_range_write_from_url, "str")
+    _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int")
+    if source_content_crc64 is not None:
+        _headers["x-ms-source-content-crc64"] = _SERIALIZER.header(
+            "source_content_crc64", source_content_crc64, "bytearray"
+        )
+    if source_if_match_crc64 is not None:
+        _headers["x-ms-source-if-match-crc64"] = _SERIALIZER.header(
+            "source_if_match_crc64", source_if_match_crc64, "bytearray"
+        )
+    if source_if_none_match_crc64 is not None:
+        _headers["x-ms-source-if-none-match-crc64"] = _SERIALIZER.header(
+            "source_if_none_match_crc64", source_if_none_match_crc64, "bytearray"
+        )
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if copy_source_authorization is not None:
+        _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header(
+            "copy_source_authorization", copy_source_authorization, "str"
+        )
+    if file_last_written_mode is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header(
+            "file_last_written_mode", file_last_written_mode, "str"
+        )
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if allow_source_trailing_dot is not None:
+        _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header(
+            "allow_source_trailing_dot", allow_source_trailing_dot, "bool"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_range_list_request(
+    url: str,
+    *,
+    sharesnapshot: Optional[str] = None,
+    prevsharesnapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    range: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    support_rename: Optional[bool] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if prevsharesnapshot is not None:
+        _params["prevsharesnapshot"] = _SERIALIZER.query("prevsharesnapshot", prevsharesnapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if range is not None:
+        _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if support_rename is not None:
+        _headers["x-ms-file-support-rename"] = _SERIALIZER.header("support_rename", support_rename, "bool")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_start_copy_request(
+    url: str,
+    *,
+    copy_source: str,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    file_permission_copy_mode: Optional[Union[str, _models.PermissionCopyModeType]] = None,
+    ignore_read_only: Optional[bool] = None,
+    file_attributes: Optional[str] = None,
+    file_creation_time: Optional[str] = None,
+    file_last_write_time: Optional[str] = None,
+    file_change_time: Optional[str] = None,
+    set_archive_attribute: Optional[bool] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_mode: Optional[str] = None,
+    file_mode_copy_mode: Optional[Union[str, _models.ModeCopyMode]] = None,
+    file_owner_copy_mode: Optional[Union[str, _models.OwnerCopyMode]] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    allow_source_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_permission_copy_mode is not None:
+        _headers["x-ms-file-permission-copy-mode"] = _SERIALIZER.header(
+            "file_permission_copy_mode", file_permission_copy_mode, "str"
+        )
+    if ignore_read_only is not None:
+        _headers["x-ms-file-copy-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if set_archive_attribute is not None:
+        _headers["x-ms-file-copy-set-archive"] = _SERIALIZER.header(
+            "set_archive_attribute", set_archive_attribute, "bool"
+        )
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if allow_source_trailing_dot is not None:
+        _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header(
+            "allow_source_trailing_dot", allow_source_trailing_dot, "bool"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    if file_mode is not None:
+        _headers["x-ms-mode"] = _SERIALIZER.header("file_mode", file_mode, "str")
+    if file_mode_copy_mode is not None:
+        _headers["x-ms-file-mode-copy-mode"] = _SERIALIZER.header("file_mode_copy_mode", file_mode_copy_mode, "str")
+    if file_owner_copy_mode is not None:
+        _headers["x-ms-file-owner-copy-mode"] = _SERIALIZER.header("file_owner_copy_mode", file_owner_copy_mode, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_abort_copy_request(
+    url: str,
+    *,
+    copy_id: str,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+    copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+        "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+    )
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["copyid"] = _SERIALIZER.query("copy_id", copy_id, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-copy-action"] = _SERIALIZER.header("copy_action_abort_constant", copy_action_abort_constant, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_handles_request(
+    url: str,
+    *,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    timeout: Optional[int] = None,
+    sharesnapshot: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_force_close_handles_request(
+    url: str,
+    *,
+    handle_id: str,
+    timeout: Optional[int] = None,
+    marker: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-handle-id"] = _SERIALIZER.header("handle_id", handle_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_rename_request(
+    url: str,
+    *,
+    rename_source: str,
+    timeout: Optional[int] = None,
+    replace_if_exists: Optional[bool] = None,
+    ignore_read_only: Optional[bool] = None,
+    source_lease_id: Optional[str] = None,
+    destination_lease_id: Optional[str] = None,
+    file_attributes: Optional[str] = None,
+    file_creation_time: Optional[str] = None,
+    file_last_write_time: Optional[str] = None,
+    file_change_time: Optional[str] = None,
+    file_permission: str = "inherit",
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    file_permission_key: Optional[str] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_content_type: Optional[str] = None,
+    allow_trailing_dot: Optional[bool] = None,
+    allow_source_trailing_dot: Optional[bool] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-file-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str")
+    if replace_if_exists is not None:
+        _headers["x-ms-file-rename-replace-if-exists"] = _SERIALIZER.header(
+            "replace_if_exists", replace_if_exists, "bool"
+        )
+    if ignore_read_only is not None:
+        _headers["x-ms-file-rename-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool")
+    if source_lease_id is not None:
+        _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str")
+    if destination_lease_id is not None:
+        _headers["x-ms-destination-lease-id"] = _SERIALIZER.header("destination_lease_id", destination_lease_id, "str")
+    if file_attributes is not None:
+        _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if file_change_time is not None:
+        _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str")
+    if file_permission is not None:
+        _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    if file_permission_key is not None:
+        _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if file_content_type is not None:
+        _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str")
+    if allow_trailing_dot is not None:
+        _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool")
+    if allow_source_trailing_dot is not None:
+        _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header(
+            "allow_source_trailing_dot", allow_source_trailing_dot, "bool"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_symbolic_link_request(
+    url: str,
+    *,
+    link_text: str,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_creation_time: str = "now",
+    file_last_write_time: str = "now",
+    request_id_parameter: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    owner: Optional[str] = None,
+    group: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if file_creation_time is not None:
+        _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str")
+    if file_last_write_time is not None:
+        _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if owner is not None:
+        _headers["x-ms-owner"] = _SERIALIZER.header("owner", owner, "str")
+    if group is not None:
+        _headers["x-ms-group"] = _SERIALIZER.header("group", group, "str")
+    _headers["x-ms-link-text"] = _SERIALIZER.header("link_text", link_text, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_symbolic_link_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    sharesnapshot: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_hard_link_request(
+    url: str,
+    *,
+    target_file: str,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["hardlink"] = kwargs.pop("restype", _params.pop("restype", "hardlink"))
+    file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    _headers["x-ms-type"] = _SERIALIZER.header("file_type_constant", file_type_constant, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-file-target-file"] = _SERIALIZER.header("target_file", target_file, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class FileOperations:  # pylint: disable=too-many-public-methods
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.AzureFileStorage`'s
+        :attr:`file` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        file_content_length: int,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        nfs_file_type: Optional[Union[str, _models.NfsFileType]] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new file or replaces a file. Note it only initializes the file with no content.
+
+        :param file_content_length: Specifies the maximum size for the file, up to 4 TB. Required.
+        :type file_content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param nfs_file_type: Optional, NFS only. Type of the file or directory. Known values are:
+         "Regular", "Directory", and "SymLink". Default value is None.
+        :type nfs_file_type: str or ~azure.storage.fileshare.models.NfsFileType
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = kwargs.pop("params", {}) or {}
+
+        file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_content_type = None
+        _file_content_encoding = None
+        _file_content_language = None
+        _file_cache_control = None
+        _file_content_md5 = None
+        _file_content_disposition = None
+        _lease_id = None
+        if file_http_headers is not None:
+            _file_cache_control = file_http_headers.file_cache_control
+            _file_content_disposition = file_http_headers.file_content_disposition
+            _file_content_encoding = file_http_headers.file_content_encoding
+            _file_content_language = file_http_headers.file_content_language
+            _file_content_md5 = file_http_headers.file_content_md5
+            _file_content_type = file_http_headers.file_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_request(
+            url=self._config.url,
+            file_content_length=file_content_length,
+            timeout=timeout,
+            file_content_type=_file_content_type,
+            file_content_encoding=_file_content_encoding,
+            file_content_language=_file_content_language,
+            file_cache_control=_file_cache_control,
+            file_content_md5=_file_content_md5,
+            file_content_disposition=_file_content_disposition,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            nfs_file_type=nfs_file_type,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            file_type_constant=file_type_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def download(
+        self,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        range_get_content_md5: Optional[bool] = None,
+        structured_body_type: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> Iterator[bytes]:
+        # pylint: disable=line-too-long
+        """Reads or downloads a file from the system, including its metadata and properties.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Return file data only from the specified byte range. Default value is None.
+        :type range: str
+        :param range_get_content_md5: When this header is set to true and specified together with the
+         Range header, the service returns the MD5 hash for the range, as long as the range is less than
+         or equal to 4 MB in size. Default value is None.
+        :type range_get_content_md5: bool
+        :param structured_body_type: Specifies the response content should be returned as a structured
+         message and specifies the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: Iterator[bytes] or the result of cls(response)
+        :rtype: Iterator[bytes]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_download_request(
+            url=self._config.url,
+            timeout=timeout,
+            range=range,
+            range_get_content_md5=range_get_content_md5,
+            structured_body_type=structured_body_type,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _decompress = kwargs.pop("decompress", True)
+        _stream = True
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200, 206]:
+            try:
+                response.read()  # Load the body in memory and close the socket
+            except (StreamConsumedError, StreamClosedError):
+                pass
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-content-md5"] = self._deserialize("bytearray", response.headers.get("x-ms-content-md5"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+        response_headers["x-ms-structured-content-length"] = self._deserialize(
+            "int", response.headers.get("x-ms-structured-content-length")
+        )
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all user-defined metadata, standard HTTP properties, and system properties for the
+        file. It does not return the content of the file.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["x-ms-type"] = self._deserialize("str", response.headers.get("x-ms-type"))
+        response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length"))
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding"))
+        response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control"))
+        response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition"))
+        response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-completion-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-copy-completion-time")
+        )
+        response_headers["x-ms-copy-status-description"] = self._deserialize(
+            "str", response.headers.get("x-ms-copy-status-description")
+        )
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress"))
+        response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+        response_headers["x-ms-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-server-encrypted")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """removes the file from the storage account.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_delete_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_http_headers(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        file_content_length: Optional[int] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        file_attributes: str = "none",
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        file_change_time: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets HTTP headers on the file.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_content_length: Resizes a file to the specified size. If the specified byte value
+         is less than the current size of the file, then all ranges above the specified byte value are
+         cleared. Default value is None.
+        :type file_content_length: int
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param file_attributes: If specified, the provided file attributes shall be set. Default value:
+         ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
+         Default value is "none".
+        :type file_attributes: str
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param file_change_time: Change time for the file/directory. Default value: Now. Default value
+         is None.
+        :type file_change_time: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_content_type = None
+        _file_content_encoding = None
+        _file_content_language = None
+        _file_cache_control = None
+        _file_content_md5 = None
+        _file_content_disposition = None
+        _lease_id = None
+        if file_http_headers is not None:
+            _file_cache_control = file_http_headers.file_cache_control
+            _file_content_disposition = file_http_headers.file_content_disposition
+            _file_content_encoding = file_http_headers.file_content_encoding
+            _file_content_language = file_http_headers.file_content_language
+            _file_content_md5 = file_http_headers.file_content_md5
+            _file_content_type = file_http_headers.file_content_type
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_http_headers_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_content_length=file_content_length,
+            file_content_type=_file_content_type,
+            file_content_encoding=_file_content_encoding,
+            file_content_language=_file_content_language,
+            file_cache_control=_file_cache_control,
+            file_content_md5=_file_content_md5,
+            file_content_disposition=_file_content_disposition,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_change_time=file_change_time,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_metadata(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Updates user-defined metadata for the specified file.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def acquire_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def release_lease(  # pylint: disable=inconsistent-return-statements
+        self, lease_id: str, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def change_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            proposed_lease_id=proposed_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def break_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """[Update] The Lease File operation establishes and manages a lock on a file for write and delete
+        operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def upload_range(  # pylint: disable=inconsistent-return-statements
+        self,
+        range: str,
+        content_length: int,
+        timeout: Optional[int] = None,
+        file_range_write: Union[str, _models.FileRangeWriteType] = "update",
+        content_md5: Optional[bytes] = None,
+        file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+        structured_body_type: Optional[str] = None,
+        structured_content_length: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        optionalbody: Optional[IO[bytes]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Upload a range of bytes to a file.
+
+        :param range: Specifies the range of bytes to be written. Both the start and end of the range
+         must be specified. For an update operation, the range can be up to 4 MB in size. For a clear
+         operation, the range can be up to the value of the file's full size. The File service accepts
+         only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be
+         specified in the following format: bytes=startByte-endByte. Required.
+        :type range: str
+        :param content_length: Specifies the number of bytes being transmitted in the request body.
+         When the x-ms-write header is set to clear, the value of this header must be set to zero.
+         Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param file_range_write: Specify one of the following options: - Update: Writes the bytes
+         specified by the request body into the specified range. The Range and Content-Length headers
+         must match to perform the update. - Clear: Clears the specified range and releases the space
+         used in storage for that range. To clear a range, set the Content-Length header to zero, and
+         set the Range header to a value that indicates the range to clear, up to maximum file size.
+         Known values are: "update" and "clear". Default value is "update".
+        :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType
+        :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of
+         the data during transport. When the Content-MD5 header is specified, the File service compares
+         the hash of the content that has arrived with the header value that was sent. If the two hashes
+         do not match, the operation will fail with error code 400 (Bad Request). Default value is None.
+        :type content_md5: bytes
+        :param file_last_written_mode: If the file last write time should be preserved or overwritten.
+         Known values are: "Now" and "Preserve". Default value is None.
+        :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode
+        :param structured_body_type: Required if the request body is a structured message. Specifies
+         the message schema version and properties. Default value is None.
+        :type structured_body_type: str
+        :param structured_content_length: Required if the request body is a structured message.
+         Specifies the length of the blob/file content inside the message body. Will always be smaller
+         than Content-Length. Default value is None.
+        :type structured_content_length: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :param optionalbody: Initial data. Default value is None.
+        :type optionalbody: IO[bytes]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        _content = optionalbody
+
+        _request = build_upload_range_request(
+            url=self._config.url,
+            range=range,
+            content_length=content_length,
+            timeout=timeout,
+            file_range_write=file_range_write,
+            content_md5=content_md5,
+            lease_id=_lease_id,
+            file_last_written_mode=file_last_written_mode,
+            structured_body_type=structured_body_type,
+            structured_content_length=structured_content_length,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-structured-body"] = self._deserialize(
+            "str", response.headers.get("x-ms-structured-body")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def upload_range_from_url(  # pylint: disable=inconsistent-return-statements
+        self,
+        range: str,
+        copy_source: str,
+        content_length: int,
+        timeout: Optional[int] = None,
+        source_range: Optional[str] = None,
+        source_content_crc64: Optional[bytes] = None,
+        copy_source_authorization: Optional[str] = None,
+        file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None,
+        source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Upload a range of bytes to a file where the contents are read from a URL.
+
+        :param range: Writes data to the specified byte range in the file. Required.
+        :type range: str
+        :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy
+         a file to another file within the same storage account, you may use Shared Key to authenticate
+         the source file. If you are copying a file from another storage account, or if you are copying
+         a blob from the same storage account or another storage account, then you must authenticate the
+         source file or blob using a shared access signature. If the source is a public blob, no
+         authentication is required to perform the copy operation. A file in a share snapshot can also
+         be specified as a copy source. Required.
+        :type copy_source: str
+        :param content_length: Specifies the number of bytes being transmitted in the request body.
+         When the x-ms-write header is set to clear, the value of this header must be set to zero.
+         Required.
+        :type content_length: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param source_range: Bytes of source data in the specified range. Default value is None.
+        :type source_range: str
+        :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be
+         read from the copy source. Default value is None.
+        :type source_content_crc64: bytes
+        :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid
+         OAuth access token to copy source. Default value is None.
+        :type copy_source_authorization: str
+        :param file_last_written_mode: If the file last write time should be preserved or overwritten.
+         Known values are: "Now" and "Preserve". Default value is None.
+        :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode
+        :param source_modified_access_conditions: Parameter group. Default value is None.
+        :type source_modified_access_conditions:
+         ~azure.storage.fileshare.models.SourceModifiedAccessConditions
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_if_match_crc64 = None
+        _source_if_none_match_crc64 = None
+        _lease_id = None
+        if source_modified_access_conditions is not None:
+            _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64
+            _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_upload_range_from_url_request(
+            url=self._config.url,
+            range=range,
+            copy_source=copy_source,
+            content_length=content_length,
+            timeout=timeout,
+            source_range=source_range,
+            source_content_crc64=source_content_crc64,
+            source_if_match_crc64=_source_if_match_crc64,
+            source_if_none_match_crc64=_source_if_none_match_crc64,
+            lease_id=_lease_id,
+            copy_source_authorization=copy_source_authorization,
+            file_last_written_mode=file_last_written_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            file_range_write_from_url=self._config.file_range_write_from_url,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-content-crc64"] = self._deserialize(
+            "bytearray", response.headers.get("x-ms-content-crc64")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_range_list(
+        self,
+        sharesnapshot: Optional[str] = None,
+        prevsharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        range: Optional[str] = None,
+        support_rename: Optional[bool] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.ShareFileRangeList:
+        # pylint: disable=line-too-long
+        """Returns the list of valid ranges for a file.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that,
+         when present, specifies the previous snapshot. Default value is None.
+        :type prevsharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param range: Specifies the range of bytes over which to list ranges, inclusively. Default
+         value is None.
+        :type range: str
+        :param support_rename: This header is allowed only when PrevShareSnapshot query parameter is
+         set. Determines whether the changed ranges for a file that has been renamed or moved between
+         the target snapshot (or the live file) and the previous snapshot should be listed. If the value
+         is true, the valid changed ranges for the file will be returned. If the value is false, the
+         operation will result in a failure with 409 (Conflict) response. The default value is false.
+         Default value is None.
+        :type support_rename: bool
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: ShareFileRangeList or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ShareFileRangeList
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist"))
+        cls: ClsType[_models.ShareFileRangeList] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_range_list_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            prevsharesnapshot=prevsharesnapshot,
+            timeout=timeout,
+            range=range,
+            lease_id=_lease_id,
+            support_rename=support_rename,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["x-ms-content-length"] = self._deserialize("int", response.headers.get("x-ms-content-length"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ShareFileRangeList", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def start_copy(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_source: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        file_mode: Optional[str] = None,
+        file_mode_copy_mode: Optional[Union[str, _models.ModeCopyMode]] = None,
+        file_owner_copy_mode: Optional[Union[str, _models.OwnerCopyMode]] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Copies a blob or file to a destination file within the storage account.
+
+        :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy
+         a file to another file within the same storage account, you may use Shared Key to authenticate
+         the source file. If you are copying a file from another storage account, or if you are copying
+         a blob from the same storage account or another storage account, then you must authenticate the
+         source file or blob using a shared access signature. If the source is a public blob, no
+         authentication is required to perform the copy operation. A file in a share snapshot can also
+         be specified as a copy source. Required.
+        :type copy_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param file_mode: Optional, NFS only. The file mode of the file or directory. Default value is
+         None.
+        :type file_mode: str
+        :param file_mode_copy_mode: NFS only. Applicable only when the copy source is a File.
+         Determines the copy behavior of the mode bits of the file. source: The mode on the destination
+         file is copied from the source file. override: The mode on the destination file is determined
+         via the x-ms-mode header. Known values are: "source" and "override". Default value is None.
+        :type file_mode_copy_mode: str or ~azure.storage.fileshare.models.ModeCopyMode
+        :param file_owner_copy_mode: NFS only. Determines the copy behavior of the owner user
+         identifier (UID) and group identifier (GID) of the file. source: The owner user identifier
+         (UID) and group identifier (GID) on the destination file is copied from the source file.
+         override: The owner user identifier (UID) and group identifier (GID) on the destination file is
+         determined via the x-ms-owner and x-ms-group  headers. Known values are: "source" and
+         "override". Default value is None.
+        :type file_owner_copy_mode: str or ~azure.storage.fileshare.models.OwnerCopyMode
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = kwargs.pop("params", {}) or {}
+
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _file_permission_copy_mode = None
+        _ignore_read_only = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        _set_archive_attribute = None
+        _lease_id = None
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+            _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode
+            _ignore_read_only = copy_file_smb_info.ignore_read_only
+            _set_archive_attribute = copy_file_smb_info.set_archive_attribute
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_start_copy_request(
+            url=self._config.url,
+            copy_source=copy_source,
+            timeout=timeout,
+            metadata=metadata,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            file_permission_copy_mode=_file_permission_copy_mode,
+            ignore_read_only=_ignore_read_only,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            set_archive_attribute=_set_archive_attribute,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_mode=file_mode,
+            file_mode_copy_mode=file_mode_copy_mode,
+            file_owner_copy_mode=file_owner_copy_mode,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id"))
+        response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def abort_copy(  # pylint: disable=inconsistent-return-statements
+        self,
+        copy_id: str,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Aborts a pending Copy File operation, and leaves a destination file with zero length and full
+        metadata.
+
+        :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy
+         File operation. Required.
+        :type copy_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy"))
+        copy_action_abort_constant: Literal["abort"] = kwargs.pop(
+            "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort")
+        )
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_abort_copy_request(
+            url=self._config.url,
+            copy_id=copy_id,
+            timeout=timeout,
+            lease_id=_lease_id,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            copy_action_abort_constant=copy_action_abort_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [204]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def list_handles(
+        self,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        **kwargs: Any
+    ) -> _models.ListHandlesResponse:
+        # pylint: disable=line-too-long
+        """Lists handles for file.
+
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :return: ListHandlesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListHandlesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles"))
+        cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_handles_request(
+            url=self._config.url,
+            marker=marker,
+            maxresults=maxresults,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ListHandlesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def force_close_handles(  # pylint: disable=inconsistent-return-statements
+        self,
+        handle_id: str,
+        timeout: Optional[int] = None,
+        marker: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Closes all handles open for given file.
+
+        :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk
+         (‘*’) is a wildcard that specifies all handles. Required.
+        :type handle_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_force_close_handles_request(
+            url=self._config.url,
+            handle_id=handle_id,
+            timeout=timeout,
+            marker=marker,
+            sharesnapshot=sharesnapshot,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker"))
+        response_headers["x-ms-number-of-handles-closed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-closed")
+        )
+        response_headers["x-ms-number-of-handles-failed"] = self._deserialize(
+            "int", response.headers.get("x-ms-number-of-handles-failed")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def rename(  # pylint: disable=inconsistent-return-statements
+        self,
+        rename_source: str,
+        timeout: Optional[int] = None,
+        replace_if_exists: Optional[bool] = None,
+        ignore_read_only: Optional[bool] = None,
+        file_permission: str = "inherit",
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        file_permission_key: Optional[str] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None,
+        destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None,
+        copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None,
+        file_http_headers: Optional[_models.FileHTTPHeaders] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Renames a file.
+
+        :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in
+         length. Required.
+        :type rename_source: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param replace_if_exists: Optional. A boolean value for if the destination file already exists,
+         whether this request will overwrite the file or not. If true, the rename will succeed and will
+         overwrite the destination file. If not provided or if false and the destination file does
+         exist, the request will not overwrite the destination file. If provided and the destination
+         file doesn’t exist, the rename will succeed. Note: This value does not override the
+         x-ms-file-copy-ignore-read-only header value. Default value is None.
+        :type replace_if_exists: bool
+        :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly
+         attribute on a preexisting destination file should be respected. If true, the rename will
+         succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will
+         cause the rename to fail. Default value is None.
+        :type ignore_read_only: bool
+        :param file_permission: If specified the permission (security descriptor) shall be set for the
+         directory/file. This header can be used if Permission size is <= 8KB, else
+         x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
+         input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
+         x-ms-file-permission-key should be specified. Default value is "inherit".
+        :type file_permission: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
+         one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
+         is None.
+        :type file_permission_key: str
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param source_lease_access_conditions: Parameter group. Default value is None.
+        :type source_lease_access_conditions:
+         ~azure.storage.fileshare.models.SourceLeaseAccessConditions
+        :param destination_lease_access_conditions: Parameter group. Default value is None.
+        :type destination_lease_access_conditions:
+         ~azure.storage.fileshare.models.DestinationLeaseAccessConditions
+        :param copy_file_smb_info: Parameter group. Default value is None.
+        :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo
+        :param file_http_headers: Parameter group. Default value is None.
+        :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _source_lease_id = None
+        _destination_lease_id = None
+        _file_attributes = None
+        _file_creation_time = None
+        _file_last_write_time = None
+        _file_change_time = None
+        _file_content_type = None
+        if source_lease_access_conditions is not None:
+            _source_lease_id = source_lease_access_conditions.source_lease_id
+        if destination_lease_access_conditions is not None:
+            _destination_lease_id = destination_lease_access_conditions.destination_lease_id
+        if copy_file_smb_info is not None:
+            _file_attributes = copy_file_smb_info.file_attributes
+            _file_change_time = copy_file_smb_info.file_change_time
+            _file_creation_time = copy_file_smb_info.file_creation_time
+            _file_last_write_time = copy_file_smb_info.file_last_write_time
+        if file_http_headers is not None:
+            _file_content_type = file_http_headers.file_content_type
+
+        _request = build_rename_request(
+            url=self._config.url,
+            rename_source=rename_source,
+            timeout=timeout,
+            replace_if_exists=replace_if_exists,
+            ignore_read_only=ignore_read_only,
+            source_lease_id=_source_lease_id,
+            destination_lease_id=_destination_lease_id,
+            file_attributes=_file_attributes,
+            file_creation_time=_file_creation_time,
+            file_last_write_time=_file_last_write_time,
+            file_change_time=_file_change_time,
+            file_permission=file_permission,
+            file_permission_format=file_permission_format,
+            file_permission_key=file_permission_key,
+            metadata=metadata,
+            file_content_type=_file_content_type,
+            allow_trailing_dot=self._config.allow_trailing_dot,
+            allow_source_trailing_dot=self._config.allow_source_trailing_dot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-request-server-encrypted"] = self._deserialize(
+            "bool", response.headers.get("x-ms-request-server-encrypted")
+        )
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+        response_headers["x-ms-file-attributes"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-attributes")
+        )
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def create_symbolic_link(  # pylint: disable=inconsistent-return-statements
+        self,
+        link_text: str,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        file_creation_time: str = "now",
+        file_last_write_time: str = "now",
+        request_id_parameter: Optional[str] = None,
+        owner: Optional[str] = None,
+        group: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a symbolic link.
+
+        :param link_text: NFS only. Required. The path to the original file, the symbolic link is
+         pointing to. The path is of type string which is not resolved and is stored as is. The path can
+         be absolute path or the relative path depending on the content stored in the symbolic link
+         file. Required.
+        :type link_text: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param file_creation_time: Creation time for the file/directory. Default value: Now. Default
+         value is "now".
+        :type file_creation_time: str
+        :param file_last_write_time: Last write time for the file/directory. Default value: Now.
+         Default value is "now".
+        :type file_last_write_time: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param owner: Optional, NFS only. The owner of the file or directory. Default value is None.
+        :type owner: str
+        :param group: Optional, NFS only. The owning group of the file or directory. Default value is
+         None.
+        :type group: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_symbolic_link_request(
+            url=self._config.url,
+            link_text=link_text,
+            timeout=timeout,
+            metadata=metadata,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            owner=owner,
+            group=group,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_symbolic_link(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """get_symbolic_link.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["symboliclink"] = kwargs.pop("restype", _params.pop("restype", "symboliclink"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_get_symbolic_link_request(
+            url=self._config.url,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-link-text"] = self._deserialize("str", response.headers.get("x-ms-link-text"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def create_hard_link(  # pylint: disable=inconsistent-return-statements
+        self,
+        target_file: str,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a hard link.
+
+        :param target_file: NFS only. Required. Specifies the path of the target file to which the link
+         will be created, up to 2 KiB in length. It should be full path of the target from the root.The
+         target file must be in the same share and hence the same storage account. Required.
+        :type target_file: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["hardlink"] = kwargs.pop("restype", _params.pop("restype", "hardlink"))
+        file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_create_hard_link_request(
+            url=self._config.url,
+            target_file=target_file,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            file_type_constant=file_type_constant,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-creation-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-creation-time")
+        )
+        response_headers["x-ms-file-last-write-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-last-write-time")
+        )
+        response_headers["x-ms-file-change-time"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-change-time")
+        )
+        response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id"))
+        response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-link-count"] = self._deserialize("int", response.headers.get("x-ms-link-count"))
+        response_headers["x-ms-mode"] = self._deserialize("str", response.headers.get("x-ms-mode"))
+        response_headers["x-ms-owner"] = self._deserialize("str", response.headers.get("x-ms-owner"))
+        response_headers["x-ms-group"] = self._deserialize("str", response.headers.get("x-ms-group"))
+        response_headers["x-ms-file-file-type"] = self._deserialize("str", response.headers.get("x-ms-file-file-type"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_patch.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_patch.py
new file mode 100644
index 00000000..f7dd3251
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_patch.py
@@ -0,0 +1,20 @@
+# ------------------------------------
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+# ------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = []  # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+    """Do not remove from this file.
+
+    `patch_sdk` is a last resort escape hatch that allows you to do customizations
+    you can't accomplish using the techniques described in
+    https://aka.ms/azsdk/python/dpcodegen/python/customize
+    """
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_service_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_service_operations.py
new file mode 100644
index 00000000..57e5b246
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_service_operations.py
@@ -0,0 +1,410 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import sys
+from typing import Any, Callable, Dict, List, Literal, Optional, TypeVar, Union
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureFileStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_set_properties_request(
+    url: str,
+    *,
+    content: Any,
+    timeout: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_shares_segment_request(
+    url: str,
+    *,
+    prefix: Optional[str] = None,
+    marker: Optional[str] = None,
+    maxresults: Optional[int] = None,
+    include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None,
+    timeout: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if prefix is not None:
+        _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str")
+    if marker is not None:
+        _params["marker"] = _SERIALIZER.query("marker", marker, "str")
+    if maxresults is not None:
+        _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1)
+    if include is not None:
+        _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ServiceOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.AzureFileStorage`'s
+        :attr:`service` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self, storage_service_properties: _models.StorageServiceProperties, timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for a storage account's File service endpoint, including properties for Storage
+        Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param storage_service_properties: The StorageService properties. Required.
+        :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True)
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(self, timeout: Optional[int] = None, **kwargs: Any) -> _models.StorageServiceProperties:
+        # pylint: disable=line-too-long
+        """Gets the properties of a storage account's File service, including properties for Storage
+        Analytics metrics and CORS (Cross-Origin Resource Sharing) rules.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: StorageServiceProperties or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.StorageServiceProperties
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None)
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("StorageServiceProperties", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def list_shares_segment(
+        self,
+        prefix: Optional[str] = None,
+        marker: Optional[str] = None,
+        maxresults: Optional[int] = None,
+        include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> _models.ListSharesResponse:
+        # pylint: disable=line-too-long
+        """The List Shares Segment operation returns a list of the shares and share snapshots under the
+        specified account.
+
+        :param prefix: Filters the results to return only entries whose name begins with the specified
+         prefix. Default value is None.
+        :type prefix: str
+        :param marker: A string value that identifies the portion of the list to be returned with the
+         next list operation. The operation returns a marker value within the response body if the list
+         returned was not complete. The marker value may then be used in a subsequent call to request
+         the next set of list items. The marker value is opaque to the client. Default value is None.
+        :type marker: str
+        :param maxresults: Specifies the maximum number of entries to return. If the request does not
+         specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000
+         items. Default value is None.
+        :type maxresults: int
+        :param include: Include this parameter to specify one or more datasets to include in the
+         response. Default value is None.
+        :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: ListSharesResponse or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ListSharesResponse
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list"))
+        cls: ClsType[_models.ListSharesResponse] = kwargs.pop("cls", None)
+
+        _request = build_list_shares_segment_request(
+            url=self._config.url,
+            prefix=prefix,
+            marker=marker,
+            maxresults=maxresults,
+            include=include,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+
+        deserialized = self._deserialize("ListSharesResponse", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_share_operations.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_share_operations.py
new file mode 100644
index 00000000..920a64e2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/operations/_share_operations.py
@@ -0,0 +1,2595 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from io import IOBase
+import sys
+from typing import Any, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union, overload
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceNotFoundError,
+    ResourceNotModifiedError,
+    map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+
+from .. import models as _models
+from .._configuration import AzureFileStorageConfiguration
+from .._serialization import Deserializer, Serializer
+
+if sys.version_info >= (3, 9):
+    from collections.abc import MutableMapping
+else:
+    from typing import MutableMapping  # type: ignore
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_create_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    quota: Optional[int] = None,
+    access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+    enabled_protocols: Optional[str] = None,
+    root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+    enable_snapshot_virtual_directory_access: Optional[bool] = None,
+    paid_bursting_enabled: Optional[bool] = None,
+    paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+    paid_bursting_max_iops: Optional[int] = None,
+    share_provisioned_iops: Optional[int] = None,
+    share_provisioned_bandwidth_mibps: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    if quota is not None:
+        _headers["x-ms-share-quota"] = _SERIALIZER.header("quota", quota, "int", minimum=1)
+    if access_tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("access_tier", access_tier, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if enabled_protocols is not None:
+        _headers["x-ms-enabled-protocols"] = _SERIALIZER.header("enabled_protocols", enabled_protocols, "str")
+    if root_squash is not None:
+        _headers["x-ms-root-squash"] = _SERIALIZER.header("root_squash", root_squash, "str")
+    if enable_snapshot_virtual_directory_access is not None:
+        _headers["x-ms-enable-snapshot-virtual-directory-access"] = _SERIALIZER.header(
+            "enable_snapshot_virtual_directory_access", enable_snapshot_virtual_directory_access, "bool"
+        )
+    if paid_bursting_enabled is not None:
+        _headers["x-ms-share-paid-bursting-enabled"] = _SERIALIZER.header(
+            "paid_bursting_enabled", paid_bursting_enabled, "bool"
+        )
+    if paid_bursting_max_bandwidth_mibps is not None:
+        _headers["x-ms-share-paid-bursting-max-bandwidth-mibps"] = _SERIALIZER.header(
+            "paid_bursting_max_bandwidth_mibps", paid_bursting_max_bandwidth_mibps, "int"
+        )
+    if paid_bursting_max_iops is not None:
+        _headers["x-ms-share-paid-bursting-max-iops"] = _SERIALIZER.header(
+            "paid_bursting_max_iops", paid_bursting_max_iops, "int"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if share_provisioned_iops is not None:
+        _headers["x-ms-share-provisioned-iops"] = _SERIALIZER.header(
+            "share_provisioned_iops", share_provisioned_iops, "int"
+        )
+    if share_provisioned_bandwidth_mibps is not None:
+        _headers["x-ms-share-provisioned-bandwidth-mibps"] = _SERIALIZER.header(
+            "share_provisioned_bandwidth_mibps", share_provisioned_bandwidth_mibps, "int"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_properties_request(
+    url: str,
+    *,
+    sharesnapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+    url: str,
+    *,
+    sharesnapshot: Optional[str] = None,
+    timeout: Optional[int] = None,
+    delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if delete_snapshots is not None:
+        _headers["x-ms-delete-snapshots"] = _SERIALIZER.header("delete_snapshots", delete_snapshots, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_acquire_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    duration: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if duration is not None:
+        _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_release_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    sharesnapshot: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_change_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    proposed_lease_id: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if proposed_lease_id is not None:
+        _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_renew_lease_request(
+    url: str,
+    *,
+    lease_id: str,
+    timeout: Optional[int] = None,
+    sharesnapshot: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_break_lease_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    break_period: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    request_id_parameter: Optional[str] = None,
+    sharesnapshot: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+    action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+    if sharesnapshot is not None:
+        _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str")
+
+    # Construct headers
+    _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str")
+    if break_period is not None:
+        _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_snapshot_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_permission_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_permission_request(
+    url: str,
+    *,
+    file_permission_key: str,
+    file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+    timeout: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/json")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str")
+    if file_permission_format is not None:
+        _headers["x-ms-file-permission-format"] = _SERIALIZER.header(
+            "file_permission_format", file_permission_format, "str"
+        )
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_properties_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    quota: Optional[int] = None,
+    access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+    lease_id: Optional[str] = None,
+    root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+    enable_snapshot_virtual_directory_access: Optional[bool] = None,
+    paid_bursting_enabled: Optional[bool] = None,
+    paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+    paid_bursting_max_iops: Optional[int] = None,
+    share_provisioned_iops: Optional[int] = None,
+    share_provisioned_bandwidth_mibps: Optional[int] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if quota is not None:
+        _headers["x-ms-share-quota"] = _SERIALIZER.header("quota", quota, "int", minimum=1)
+    if access_tier is not None:
+        _headers["x-ms-access-tier"] = _SERIALIZER.header("access_tier", access_tier, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if root_squash is not None:
+        _headers["x-ms-root-squash"] = _SERIALIZER.header("root_squash", root_squash, "str")
+    if enable_snapshot_virtual_directory_access is not None:
+        _headers["x-ms-enable-snapshot-virtual-directory-access"] = _SERIALIZER.header(
+            "enable_snapshot_virtual_directory_access", enable_snapshot_virtual_directory_access, "bool"
+        )
+    if paid_bursting_enabled is not None:
+        _headers["x-ms-share-paid-bursting-enabled"] = _SERIALIZER.header(
+            "paid_bursting_enabled", paid_bursting_enabled, "bool"
+        )
+    if paid_bursting_max_bandwidth_mibps is not None:
+        _headers["x-ms-share-paid-bursting-max-bandwidth-mibps"] = _SERIALIZER.header(
+            "paid_bursting_max_bandwidth_mibps", paid_bursting_max_bandwidth_mibps, "int"
+        )
+    if paid_bursting_max_iops is not None:
+        _headers["x-ms-share-paid-bursting-max-iops"] = _SERIALIZER.header(
+            "paid_bursting_max_iops", paid_bursting_max_iops, "int"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if share_provisioned_iops is not None:
+        _headers["x-ms-share-provisioned-iops"] = _SERIALIZER.header(
+            "share_provisioned_iops", share_provisioned_iops, "int"
+        )
+    if share_provisioned_bandwidth_mibps is not None:
+        _headers["x-ms-share-provisioned-bandwidth-mibps"] = _SERIALIZER.header(
+            "share_provisioned_bandwidth_mibps", share_provisioned_bandwidth_mibps, "int"
+        )
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_metadata_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    metadata: Optional[Dict[str, str]] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    if metadata is not None:
+        _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}")
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_access_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_set_access_policy_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    content: Any = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+    content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    if content_type is not None:
+        _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs)
+
+
+def build_get_statistics_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    lease_id: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if lease_id is not None:
+        _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str")
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_restore_request(
+    url: str,
+    *,
+    timeout: Optional[int] = None,
+    request_id_parameter: Optional[str] = None,
+    deleted_share_name: Optional[str] = None,
+    deleted_share_version: Optional[str] = None,
+    file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None,
+    **kwargs: Any
+) -> HttpRequest:
+    _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+    _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+    restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+    comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+    version: Literal["2025-05-05"] = kwargs.pop("version", _headers.pop("x-ms-version", "2025-05-05"))
+    accept = _headers.pop("Accept", "application/xml")
+
+    # Construct URL
+    _url = kwargs.pop("template_url", "{url}")
+    path_format_arguments = {
+        "url": _SERIALIZER.url("url", url, "str", skip_quote=True),
+    }
+
+    _url: str = _url.format(**path_format_arguments)  # type: ignore
+
+    # Construct parameters
+    _params["restype"] = _SERIALIZER.query("restype", restype, "str")
+    _params["comp"] = _SERIALIZER.query("comp", comp, "str")
+    if timeout is not None:
+        _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0)
+
+    # Construct headers
+    _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str")
+    if request_id_parameter is not None:
+        _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str")
+    if deleted_share_name is not None:
+        _headers["x-ms-deleted-share-name"] = _SERIALIZER.header("deleted_share_name", deleted_share_name, "str")
+    if deleted_share_version is not None:
+        _headers["x-ms-deleted-share-version"] = _SERIALIZER.header(
+            "deleted_share_version", deleted_share_version, "str"
+        )
+    if file_request_intent is not None:
+        _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str")
+    _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+    return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ShareOperations:
+    """
+    .. warning::
+        **DO NOT** instantiate this class directly.
+
+        Instead, you should access the following operations through
+        :class:`~azure.storage.fileshare.AzureFileStorage`'s
+        :attr:`share` attribute.
+    """
+
+    models = _models
+
+    def __init__(self, *args, **kwargs):
+        input_args = list(args)
+        self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+        self._config: AzureFileStorageConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+        self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+        self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+    @distributed_trace
+    def create(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        quota: Optional[int] = None,
+        access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+        enabled_protocols: Optional[str] = None,
+        root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+        enable_snapshot_virtual_directory_access: Optional[bool] = None,
+        paid_bursting_enabled: Optional[bool] = None,
+        paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+        paid_bursting_max_iops: Optional[int] = None,
+        share_provisioned_iops: Optional[int] = None,
+        share_provisioned_bandwidth_mibps: Optional[int] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a new share under the specified account. If the share with the same name already
+        exists, the operation fails.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None.
+        :type quota: int
+        :param access_tier: Specifies the access tier of the share. Known values are:
+         "TransactionOptimized", "Hot", "Cool", and "Premium". Default value is None.
+        :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :param enabled_protocols: Protocols to enable on the share. Default value is None.
+        :type enabled_protocols: str
+        :param root_squash: Root squash to set on the share.  Only valid for NFS shares. Known values
+         are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None.
+        :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+        :param enable_snapshot_virtual_directory_access: Default value is None.
+        :type enable_snapshot_virtual_directory_access: bool
+        :param paid_bursting_enabled: Optional. Boolean. Default if not specified is false. This
+         property enables paid bursting. Default value is None.
+        :type paid_bursting_enabled: bool
+        :param paid_bursting_max_bandwidth_mibps: Optional. Integer. Default if not specified is the
+         maximum throughput the file share can support. Current maximum for a file share is 10,340
+         MiB/sec. Default value is None.
+        :type paid_bursting_max_bandwidth_mibps: int
+        :param paid_bursting_max_iops: Optional. Integer. Default if not specified is the maximum IOPS
+         the file share can support. Current maximum for a file share is 102,400 IOPS. Default value is
+         None.
+        :type paid_bursting_max_iops: int
+        :param share_provisioned_iops: Optional. Supported in version 2025-01-05 and later. Only
+         allowed for provisioned v2 file shares. Specifies the provisioned number of input/output
+         operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is
+         set to value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_iops: int
+        :param share_provisioned_bandwidth_mibps: Optional. Supported in version 2025-01-05 and later.
+         Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share,
+         in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to
+         value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_bandwidth_mibps: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            quota=quota,
+            access_tier=access_tier,
+            enabled_protocols=enabled_protocols,
+            root_squash=root_squash,
+            enable_snapshot_virtual_directory_access=enable_snapshot_virtual_directory_access,
+            paid_bursting_enabled=paid_bursting_enabled,
+            paid_bursting_max_bandwidth_mibps=paid_bursting_max_bandwidth_mibps,
+            paid_bursting_max_iops=paid_bursting_max_iops,
+            share_provisioned_iops=share_provisioned_iops,
+            share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Returns all user-defined metadata and system properties for the specified share or share
+        snapshot. The data returned does not include the share's list of files.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_properties_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-ingress-mbps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-ingress-mbps")
+        )
+        response_headers["x-ms-share-provisioned-egress-mbps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-egress-mbps")
+        )
+        response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration"))
+        response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state"))
+        response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status"))
+        response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier"))
+        response_headers["x-ms-access-tier-change-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-access-tier-change-time")
+        )
+        response_headers["x-ms-access-tier-transition-state"] = self._deserialize(
+            "str", response.headers.get("x-ms-access-tier-transition-state")
+        )
+        response_headers["x-ms-enabled-protocols"] = self._deserialize(
+            "str", response.headers.get("x-ms-enabled-protocols")
+        )
+        response_headers["x-ms-root-squash"] = self._deserialize("str", response.headers.get("x-ms-root-squash"))
+        response_headers["x-ms-enable-snapshot-virtual-directory-access"] = self._deserialize(
+            "bool", response.headers.get("x-ms-enable-snapshot-virtual-directory-access")
+        )
+        response_headers["x-ms-share-paid-bursting-enabled"] = self._deserialize(
+            "bool", response.headers.get("x-ms-share-paid-bursting-enabled")
+        )
+        response_headers["x-ms-share-paid-bursting-max-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-paid-bursting-max-iops")
+        )
+        response_headers["x-ms-share-paid-bursting-max-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-paid-bursting-max-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-iops-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-iops-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def delete(  # pylint: disable=inconsistent-return-statements
+        self,
+        sharesnapshot: Optional[str] = None,
+        timeout: Optional[int] = None,
+        delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Operation marks the specified share or share snapshot for deletion. The share or share snapshot
+        and any files contained within it are later deleted during garbage collection.
+
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param delete_snapshots: Specifies the option include to delete the base share and all of its
+         snapshots. Known values are: "include" and "include-leased". Default value is None.
+        :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_delete_request(
+            url=self._config.url,
+            sharesnapshot=sharesnapshot,
+            timeout=timeout,
+            delete_snapshots=delete_snapshots,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-share-usage-bytes"] = self._deserialize(
+            "int", response.headers.get("x-ms-file-share-usage-bytes")
+        )
+        response_headers["x-ms-file-share-snapshot-usage-bytes"] = self._deserialize(
+            "int", response.headers.get("x-ms-file-share-snapshot-usage-bytes")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def acquire_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        duration: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
+         lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
+         duration cannot be changed using renew or change. Default value is None.
+        :type duration: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_acquire_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            duration=duration,
+            proposed_lease_id=proposed_lease_id,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def release_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_release_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def change_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        proposed_lease_id: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
+         400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+         Constructor (String) for a list of valid GUID string formats. Default value is None.
+        :type proposed_lease_id: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_change_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            proposed_lease_id=proposed_lease_id,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def renew_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        lease_id: str,
+        timeout: Optional[int] = None,
+        sharesnapshot: Optional[str] = None,
+        request_id_parameter: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param lease_id: Specifies the current lease ID on the resource. Required.
+        :type lease_id: str
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_renew_lease_request(
+            url=self._config.url,
+            lease_id=lease_id,
+            timeout=timeout,
+            sharesnapshot=sharesnapshot,
+            request_id_parameter=request_id_parameter,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def break_lease(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        break_period: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        sharesnapshot: Optional[str] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
+        for set and delete share operations.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param break_period: For a break operation, proposed duration the lease should continue before
+         it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
+         than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
+         lease will not be available before the break period has expired, but the lease may be held for
+         longer than the break period. If this header does not appear with a break operation, a
+         fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
+         breaks immediately. Default value is None.
+        :type break_period: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
+         specifies the share snapshot to query. Default value is None.
+        :type sharesnapshot: str
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease"))
+        action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break"))
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_break_lease_request(
+            url=self._config.url,
+            timeout=timeout,
+            break_period=break_period,
+            lease_id=_lease_id,
+            request_id_parameter=request_id_parameter,
+            sharesnapshot=sharesnapshot,
+            file_request_intent=self._config.file_request_intent,
+            comp=comp,
+            action=action,
+            restype=restype,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [202]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time"))
+        response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def create_snapshot(  # pylint: disable=inconsistent-return-statements
+        self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Creates a read-only snapshot of a share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_create_snapshot_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot"))
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @overload
+    def create_permission(
+        self,
+        share_permission: _models.SharePermission,
+        timeout: Optional[int] = None,
+        *,
+        content_type: str = "application/json",
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Required.
+        :type share_permission: ~azure.storage.fileshare.models.SharePermission
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+         Default value is "application/json".
+        :paramtype content_type: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+
+    @overload
+    def create_permission(
+        self,
+        share_permission: IO[bytes],
+        timeout: Optional[int] = None,
+        *,
+        content_type: str = "application/json",
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Required.
+        :type share_permission: IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+         Default value is "application/json".
+        :paramtype content_type: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+
+    @distributed_trace
+    def create_permission(  # pylint: disable=inconsistent-return-statements
+        self, share_permission: Union[_models.SharePermission, IO[bytes]], timeout: Optional[int] = None, **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Create a permission (a security descriptor).
+
+        :param share_permission: A permission (a security descriptor) at the share level. Is either a
+         SharePermission type or a IO[bytes] type. Required.
+        :type share_permission: ~azure.storage.fileshare.models.SharePermission or IO[bytes]
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+        content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        content_type = content_type or "application/json"
+        _json = None
+        _content = None
+        if isinstance(share_permission, (IOBase, bytes)):
+            _content = share_permission
+        else:
+            _json = self._serialize.body(share_permission, "SharePermission")
+
+        _request = build_create_permission_request(
+            url=self._config.url,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            json=_json,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-file-permission-key"] = self._deserialize(
+            "str", response.headers.get("x-ms-file-permission-key")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_permission(
+        self,
+        file_permission_key: str,
+        file_permission_format: Optional[Union[str, _models.FilePermissionFormat]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> _models.SharePermission:
+        # pylint: disable=line-too-long
+        """Returns the permission (security descriptor) for a given key.
+
+        :param file_permission_key: Key of the permission to be set for the directory/file. Required.
+        :type file_permission_key: str
+        :param file_permission_format: Optional. Available for version 2023-06-01 and later. Specifies
+         the format in which the permission is returned. Acceptable values are SDDL or binary. If
+         x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is
+         returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the
+         permission is returned as a base64 string representing the binary encoding of the permission.
+         Known values are: "Sddl" and "Binary". Default value is None.
+        :type file_permission_format: str or ~azure.storage.fileshare.models.FilePermissionFormat
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :return: SharePermission or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.SharePermission
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission"))
+        cls: ClsType[_models.SharePermission] = kwargs.pop("cls", None)
+
+        _request = build_get_permission_request(
+            url=self._config.url,
+            file_permission_key=file_permission_key,
+            file_permission_format=file_permission_format,
+            timeout=timeout,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("SharePermission", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def set_properties(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        quota: Optional[int] = None,
+        access_tier: Optional[Union[str, _models.ShareAccessTier]] = None,
+        root_squash: Optional[Union[str, _models.ShareRootSquash]] = None,
+        enable_snapshot_virtual_directory_access: Optional[bool] = None,
+        paid_bursting_enabled: Optional[bool] = None,
+        paid_bursting_max_bandwidth_mibps: Optional[int] = None,
+        paid_bursting_max_iops: Optional[int] = None,
+        share_provisioned_iops: Optional[int] = None,
+        share_provisioned_bandwidth_mibps: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets properties for the specified share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None.
+        :type quota: int
+        :param access_tier: Specifies the access tier of the share. Known values are:
+         "TransactionOptimized", "Hot", "Cool", and "Premium". Default value is None.
+        :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :param root_squash: Root squash to set on the share.  Only valid for NFS shares. Known values
+         are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None.
+        :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
+        :param enable_snapshot_virtual_directory_access: Default value is None.
+        :type enable_snapshot_virtual_directory_access: bool
+        :param paid_bursting_enabled: Optional. Boolean. Default if not specified is false. This
+         property enables paid bursting. Default value is None.
+        :type paid_bursting_enabled: bool
+        :param paid_bursting_max_bandwidth_mibps: Optional. Integer. Default if not specified is the
+         maximum throughput the file share can support. Current maximum for a file share is 10,340
+         MiB/sec. Default value is None.
+        :type paid_bursting_max_bandwidth_mibps: int
+        :param paid_bursting_max_iops: Optional. Integer. Default if not specified is the maximum IOPS
+         the file share can support. Current maximum for a file share is 102,400 IOPS. Default value is
+         None.
+        :type paid_bursting_max_iops: int
+        :param share_provisioned_iops: Optional. Supported in version 2025-01-05 and later. Only
+         allowed for provisioned v2 file shares. Specifies the provisioned number of input/output
+         operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is
+         set to value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_iops: int
+        :param share_provisioned_bandwidth_mibps: Optional. Supported in version 2025-01-05 and later.
+         Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share,
+         in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to
+         value calculated based on recommendation formula. Default value is None.
+        :type share_provisioned_bandwidth_mibps: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_properties_request(
+            url=self._config.url,
+            timeout=timeout,
+            quota=quota,
+            access_tier=access_tier,
+            lease_id=_lease_id,
+            root_squash=root_squash,
+            enable_snapshot_virtual_directory_access=enable_snapshot_virtual_directory_access,
+            paid_bursting_enabled=paid_bursting_enabled,
+            paid_bursting_max_bandwidth_mibps=paid_bursting_max_bandwidth_mibps,
+            paid_bursting_max_iops=paid_bursting_max_iops,
+            share_provisioned_iops=share_provisioned_iops,
+            share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+        response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-iops-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-iops-downgrade-time")
+        )
+        response_headers["x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time"] = self._deserialize(
+            "rfc-1123", response.headers.get("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def set_metadata(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets one or more user-defined name-value pairs for the specified share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param metadata: A name-value pair to associate with a file storage object. Default value is
+         None.
+        :type metadata: dict[str, str]
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_set_metadata_request(
+            url=self._config.url,
+            timeout=timeout,
+            metadata=metadata,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_access_policy(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> List[_models.SignedIdentifier]:
+        # pylint: disable=line-too-long
+        """Returns information about stored access policies specified on the share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: list of SignedIdentifier or the result of cls(response)
+        :rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("[SignedIdentifier]", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def set_access_policy(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        share_acl: Optional[List[_models.SignedIdentifier]] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Sets a stored access policy for use with shared access signatures.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :param share_acl: The ACL for the share. Default value is None.
+        :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier]
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl"))
+        content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+        serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True}}
+        if share_acl is not None:
+            _content = self._serialize.body(
+                share_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt
+            )
+        else:
+            _content = None
+
+        _request = build_set_access_policy_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            content_type=content_type,
+            version=self._config.version,
+            content=_content,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
+
+    @distributed_trace
+    def get_statistics(
+        self,
+        timeout: Optional[int] = None,
+        lease_access_conditions: Optional[_models.LeaseAccessConditions] = None,
+        **kwargs: Any
+    ) -> _models.ShareStats:
+        # pylint: disable=line-too-long
+        """Retrieves statistics related to the share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param lease_access_conditions: Parameter group. Default value is None.
+        :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
+        :return: ShareStats or the result of cls(response)
+        :rtype: ~azure.storage.fileshare.models.ShareStats
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats"))
+        cls: ClsType[_models.ShareStats] = kwargs.pop("cls", None)
+
+        _lease_id = None
+        if lease_access_conditions is not None:
+            _lease_id = lease_access_conditions.lease_id
+
+        _request = build_get_statistics_request(
+            url=self._config.url,
+            timeout=timeout,
+            lease_id=_lease_id,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [200]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+
+        deserialized = self._deserialize("ShareStats", pipeline_response.http_response)
+
+        if cls:
+            return cls(pipeline_response, deserialized, response_headers)  # type: ignore
+
+        return deserialized  # type: ignore
+
+    @distributed_trace
+    def restore(  # pylint: disable=inconsistent-return-statements
+        self,
+        timeout: Optional[int] = None,
+        request_id_parameter: Optional[str] = None,
+        deleted_share_name: Optional[str] = None,
+        deleted_share_version: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        # pylint: disable=line-too-long
+        """Restores a previously deleted Share.
+
+        :param timeout: The timeout parameter is expressed in seconds. For more information, see
+         :code:`<a
+         href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
+         Timeouts for File Service Operations.</a>`. Default value is None.
+        :type timeout: int
+        :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+         limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
+         value is None.
+        :type request_id_parameter: str
+        :param deleted_share_name: Specifies the name of the previously-deleted share. Default value is
+         None.
+        :type deleted_share_name: str
+        :param deleted_share_version: Specifies the version of the previously-deleted share. Default
+         value is None.
+        :type deleted_share_version: str
+        :return: None or the result of cls(response)
+        :rtype: None
+        :raises ~azure.core.exceptions.HttpResponseError:
+        """
+        error_map: MutableMapping = {
+            401: ClientAuthenticationError,
+            404: ResourceNotFoundError,
+            409: ResourceExistsError,
+            304: ResourceNotModifiedError,
+        }
+        error_map.update(kwargs.pop("error_map", {}) or {})
+
+        _headers = kwargs.pop("headers", {}) or {}
+        _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+        restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share"))
+        comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete"))
+        cls: ClsType[None] = kwargs.pop("cls", None)
+
+        _request = build_restore_request(
+            url=self._config.url,
+            timeout=timeout,
+            request_id_parameter=request_id_parameter,
+            deleted_share_name=deleted_share_name,
+            deleted_share_version=deleted_share_version,
+            file_request_intent=self._config.file_request_intent,
+            restype=restype,
+            comp=comp,
+            version=self._config.version,
+            headers=_headers,
+            params=_params,
+        )
+        _request.url = self._client.format_url(_request.url)
+
+        _stream = False
+        pipeline_response: PipelineResponse = self._client._pipeline.run(  # pylint: disable=protected-access
+            _request, stream=_stream, **kwargs
+        )
+
+        response = pipeline_response.http_response
+
+        if response.status_code not in [201]:
+            map_error(status_code=response.status_code, response=response, error_map=error_map)
+            error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
+            raise HttpResponseError(response=response, model=error)
+
+        response_headers = {}
+        response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag"))
+        response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified"))
+        response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
+        response_headers["x-ms-client-request-id"] = self._deserialize(
+            "str", response.headers.get("x-ms-client-request-id")
+        )
+        response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version"))
+        response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date"))
+        response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota"))
+        response_headers["x-ms-share-provisioned-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-iops")
+        )
+        response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps")
+        )
+        response_headers["x-ms-share-included-burst-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-included-burst-iops")
+        )
+        response_headers["x-ms-share-max-burst-credits-for-iops"] = self._deserialize(
+            "int", response.headers.get("x-ms-share-max-burst-credits-for-iops")
+        )
+
+        if cls:
+            return cls(pipeline_response, None, response_headers)  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/py.typed
new file mode 100644
index 00000000..e5aff4f8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_generated/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_lease.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_lease.py
new file mode 100644
index 00000000..aa65f693
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_lease.py
@@ -0,0 +1,251 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import Union, Optional, Any, TYPE_CHECKING
+
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.exceptions import HttpResponseError
+
+from ._shared.response_handlers import return_response_headers, process_storage_error
+from ._generated.operations import FileOperations, ShareOperations
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from azure.storage.fileshare import ShareClient, ShareFileClient
+
+
+class ShareLeaseClient(object):  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new ShareLeaseClient.
+
+    This client provides lease operations on a ShareClient or ShareFileClient.
+
+    :param client:
+        The client of the file or share to lease.
+    :type client: ~azure.storage.fileshare.ShareFileClient or
+        ~azure.storage.fileshare.ShareClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+
+    id: str
+    """The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired."""
+    etag: Optional[str]
+    """The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified."""
+    last_modified: Optional["datetime"]
+    """The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified."""
+
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["ShareFileClient", "ShareClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'file_name'):
+            self._client = client._client.file  # type: ignore
+            self._snapshot = None
+        elif hasattr(client, 'share_name'):
+            self._client = client._client.share
+            self._snapshot = client.snapshot
+        else:
+            raise TypeError("Lease must use ShareFileClient or ShareClient.")
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args: Any):
+        self.release()
+
+    @distributed_trace
+    def acquire(self, **kwargs: Any) -> None:
+        """Requests a new lease. This operation establishes and manages a lock on a
+        file or share for write and delete operations. If the file or share does not have an active lease,
+        the File or Share service creates a lease on the file or share. If the file has an active lease,
+        you can only request a new lease using the active lease ID.
+
+
+        If the file or share does not have an active lease, the File or Share service creates a
+        lease on the file and returns a new lease ID.
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be
+            between 15 and 60 seconds. A share lease duration cannot be changed
+            using renew or change. Default is -1 (infinite share lease).
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        try:
+            lease_duration = kwargs.pop('lease_duration', -1)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace
+    def renew(self, **kwargs: Any) -> None:
+        """Renews the share lease.
+
+        The share lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the share. Note that
+        the lease may be renewed even if it has expired as long as the share
+        has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        .. versionadded:: 12.6.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        if isinstance(self._client, FileOperations):
+            raise TypeError("Lease renewal operations are only valid for ShareClient.")
+        try:
+            response = self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                sharesnapshot=self._snapshot,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def release(self, **kwargs: Any) -> None:
+        """Releases the lease. The lease may be released if the lease ID specified on the request matches
+        that associated with the share or file. Releasing the lease allows another client to immediately acquire
+        the lease for the share or file as soon as the release is complete.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and
+        a new lease ID in x-ms-proposed-lease-id.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The File or Share service will raise an error
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace
+    def break_lease(self, **kwargs: Any) -> int:
+        """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. An infinite lease breaks immediately.
+
+        Once a lease is broken, it cannot be changed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :keyword int lease_break_period:
+            This is the proposed duration of seconds that the share lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the share lease. If longer, the time remaining on the share lease is used.
+            A new share lease will not be available before the break period has
+            expired, but the share lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration share lease breaks after the remaining share lease
+            period elapses, and an infinite share lease breaks immediately.
+
+            .. versionadded:: 12.6.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        try:
+            lease_break_period = kwargs.pop('lease_break_period', None)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            if isinstance(self._client, ShareOperations):
+                kwargs['break_period'] = lease_break_period
+            if isinstance(self._client, FileOperations) and lease_break_period:
+                raise TypeError("Setting a lease break period is only applicable to Share leases.")
+
+            response = self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time')  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_models.py
new file mode 100644
index 00000000..ec95b64c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_models.py
@@ -0,0 +1,1294 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes, super-init-not-called, too-many-lines
+
+from enum import Enum
+from typing import (
+    Any, Callable, Dict, List, Literal, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import unquote
+from typing_extensions import Self
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import PageIterator
+
+from ._generated.models import AccessPolicy as GenAccessPolicy
+from ._generated.models import CorsRule as GeneratedCorsRule
+from ._generated.models import DirectoryItem
+from ._generated.models import Metrics as GeneratedMetrics
+from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
+from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings
+from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings
+from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel
+from ._generated.models import StorageServiceProperties as GeneratedStorageServiceProperties
+from ._parser import _parse_datetime_from_str
+from ._shared.models import DictMixin, get_enum_value
+from ._shared.response_handlers import process_storage_error, return_context_and_deserialized
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from ._generated.models import ShareRootSquash
+
+
+def _wrap_item(item):
+    if isinstance(item, DirectoryItem):
+        return {'name': item.name, 'is_directory': True}
+    return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
+
+
+class RetentionPolicy(GeneratedRetentionPolicy):
+    """The retention policy which determines how long the associated data should
+    persist.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :param bool enabled:
+        Indicates whether a retention policy is enabled for the storage service.
+    :param Optional[int] days:
+        Indicates the number of days that metrics or logging or soft-deleted data should be retained.
+        All data older than this value will be deleted.
+    """
+
+    enabled: bool = False
+    """Indicates whether a retention policy is enabled for the storage service."""
+    days: Optional[int] = None
+    """Indicates the number of days that metrics or logging or soft-deleted data should be retained.
+        All data older than this value will be deleted."""
+
+    def __init__(self, enabled: bool = False, days: Optional[int] = None) -> None:
+        self.enabled = enabled
+        self.days = days
+        if self.enabled and (self.days is None):
+            raise ValueError("If policy is enabled, 'days' must be specified.")
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            enabled=generated.enabled,
+            days=generated.days,
+        )
+
+
+class Metrics(GeneratedMetrics):
+    """A summary of request statistics grouped by API in hour or minute aggregates
+    for files.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :keyword str version:
+        The version of Storage Analytics to configure. The default value is 1.0.
+    :keyword bool enabled:
+        Indicates whether metrics are enabled for the File service.
+    :keyword bool include_apis:
+        Indicates whether metrics should generate summary statistics for called API operations.
+    :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy:
+        Determines how long the associated data should persist.
+    """
+
+    version: str = '1.0'
+    """The version of Storage Analytics to configure."""
+    enabled: bool = False
+    """Indicates whether metrics are enabled for the File service."""
+    include_apis: bool
+    """Indicates whether metrics should generate summary statistics for called API operations."""
+    retention_policy: RetentionPolicy = RetentionPolicy()
+    """Determines how long the associated data should persist."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.version = kwargs.get('version', '1.0')
+        self.enabled = kwargs.get('enabled', False)
+        self.include_apis = kwargs.get('include_apis')  # type: ignore [assignment]
+        self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+    @classmethod
+    def _from_generated(cls, generated):
+        if not generated:
+            return cls()
+        return cls(
+            version=generated.version,
+            enabled=generated.enabled,
+            include_apis=generated.include_apis,
+            retention_policy=RetentionPolicy._from_generated(generated.retention_policy)  # pylint: disable=protected-access
+        )
+
+
+class CorsRule(GeneratedCorsRule):
+    """CORS is an HTTP feature that enables a web application running under one
+    domain to access resources in another domain. Web browsers implement a
+    security restriction known as same-origin policy that prevents a web page
+    from calling APIs in a different domain; CORS provides a secure way to
+    allow one domain (the origin domain) to call APIs in another domain.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :param List[str] allowed_origins:
+        A list of origin domains that will be allowed via CORS, or "*" to allow
+        all domains. The list of must contain at least one entry. Limited to 64
+        origin domains. Each allowed origin can have up to 256 characters.
+    :param List[str] allowed_methods:
+        A list of HTTP methods that are allowed to be executed by the origin.
+        The list of must contain at least one entry. For Azure Storage,
+        permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+    :keyword List[str] allowed_headers:
+        Defaults to an empty list. A list of headers allowed to be part of
+        the cross-origin request. Limited to 64 defined headers and 2 prefixed
+        headers. Each header can be up to 256 characters.
+    :keyword List[str] exposed_headers:
+        Defaults to an empty list. A list of response headers to expose to CORS
+        clients. Limited to 64 defined headers and two prefixed headers. Each
+        header can be up to 256 characters.
+    :keyword int max_age_in_seconds:
+        The number of seconds that the client/browser should cache a
+        preflight response.
+    """
+
+    allowed_origins: str
+    """The comma-delimited string representation of the list of origin domains
+        that will be allowed via CORS, or "*" to allow all domains."""
+    allowed_methods: str
+    """The comma-delimited string representation of the list of HTTP methods
+        that are allowed to be executed by the origin."""
+    allowed_headers: str
+    """The comma-delimited string representation of the list of headers
+        allowed to be a part of the cross-origin request."""
+    exposed_headers: str
+    """The comma-delimited string representation of the list of response
+        headers to expose to CORS clients."""
+    max_age_in_seconds: int
+    """The number of seconds that the client/browser should cache a pre-flight response."""
+
+    def __init__(self, allowed_origins: List[str], allowed_methods: List[str], **kwargs: Any) -> None:
+        self.allowed_origins = ','.join(allowed_origins)
+        self.allowed_methods = ','.join(allowed_methods)
+        self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+        self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+        self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+    @staticmethod
+    def _to_generated(rules: Optional[List["CorsRule"]]) -> Optional[List[GeneratedCorsRule]]:
+        if rules is None:
+            return rules
+
+        generated_cors_list = []
+        for cors_rule in rules:
+            generated_cors = GeneratedCorsRule(
+                allowed_origins=cors_rule.allowed_origins,
+                allowed_methods=cors_rule.allowed_methods,
+                allowed_headers=cors_rule.allowed_headers,
+                exposed_headers=cors_rule.exposed_headers,
+                max_age_in_seconds=cors_rule.max_age_in_seconds,
+            )
+            generated_cors_list.append(generated_cors)
+
+        return generated_cors_list
+
+    @classmethod
+    def _from_generated(cls, generated):
+        return cls(
+            [generated.allowed_origins],
+            [generated.allowed_methods],
+            allowed_headers=[generated.allowed_headers],
+            exposed_headers=[generated.exposed_headers],
+            max_age_in_seconds=generated.max_age_in_seconds,
+        )
+
+
+class SmbMultichannel(GeneratedSmbMultichannel):
+    """Settings for Multichannel.
+
+    :keyword bool enabled: If SMB Multichannel is enabled.
+    """
+
+    enabled: Optional[bool]
+    """If SMB Multichannel is enabled."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.enabled = kwargs.get('enabled')
+        if self.enabled is None:
+            raise ValueError("The value 'enabled' must be specified.")
+
+
+class ShareSmbSettings(GeneratedShareSmbSettings):
+    """Settings for the SMB protocol.
+
+    :keyword SmbMultichannel multichannel: Sets the multichannel settings.
+    """
+
+    multichannel: SmbMultichannel
+    """Sets the multichannel settings."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.multichannel = kwargs.get('multichannel')  # type: ignore [assignment]
+        if self.multichannel is None:
+            raise ValueError("The value 'multichannel' must be specified.")
+
+
+class ShareProtocolSettings(GeneratedShareProtocolSettings):
+    """Protocol Settings class used by the set and get service properties methods in the share service.
+
+    Contains protocol properties of the share service such as the SMB setting of the share service.
+
+    :keyword ShareSmbSettings smb: Sets SMB settings.
+    """
+
+    smb: ShareSmbSettings
+    """Sets the SMB settings."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.smb = kwargs.get('smb')  # type: ignore [assignment]
+        if self.smb is None:
+            raise ValueError("The value 'smb' must be specified.")
+
+    @classmethod
+    def _from_generated(cls, generated):
+        return cls(
+            smb=generated.smb)
+
+
+class ShareSasPermissions(object):
+    """ShareSasPermissions class to be used to be used with
+    generating shared access signature and access policy operations.
+
+    :param bool read:
+        Read the content, properties or metadata of any file in the share. Use any
+        file in the share as the source of a copy operation.
+    :param bool write:
+        For any file in the share, create or write content, properties or metadata.
+        Resize the file. Use the file as the destination of a copy operation within
+        the same account.
+        Note: You cannot grant permissions to read or write share properties or
+        metadata with a service SAS. Use an account SAS instead.
+    :param bool delete:
+        Delete any file in the share.
+        Note: You cannot grant permissions to delete a share with a service SAS. Use
+        an account SAS instead.
+    :param bool list:
+        List files and directories in the share.
+    :param bool create:
+        Create a new file in the share, or copy a file to a new file in the share.
+    """
+
+    read: bool = False
+    """The read permission for share SAS."""
+    write: bool = False
+    """The write permission for share SAS."""
+    delete: bool = False
+    """The delete permission for share SAS."""
+    list: bool = False
+    """The list permission for share SAS."""
+    create: bool = False
+    """The create permission for share SAS."""
+
+    def __init__(
+        self, read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,
+        create: bool = False
+    ) -> None:
+        self.read = read
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self.list = list
+        self._str = (('r' if self.read else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('l' if self.list else ''))
+
+    def __str__(self) -> str:
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission: str) -> Self:
+        """Create a ShareSasPermissions from a string.
+
+        To specify read, create, write, delete, or list permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        write permissions, you would provide a string "rw".
+
+        :param str permission: The string which dictates the read, create, write,
+            delete, or list permissions
+        :return: A ShareSasPermissions object
+        :rtype: ~azure.storage.fileshare.ShareSasPermissions
+        """
+        p_read = 'r' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_list = 'l' in permission
+
+        parsed = cls(p_read, p_write, p_delete, p_list, p_create)
+
+        return parsed
+
+
+class AccessPolicy(GenAccessPolicy):
+    """Access Policy class used by the set and get acl methods in each service.
+
+    A stored access policy can specify the start time, expiry time, and
+    permissions for the Shared Access Signatures with which it's associated.
+    Depending on how you want to control access to your resource, you can
+    specify all of these parameters within the stored access policy, and omit
+    them from the URL for the Shared Access Signature. Doing so permits you to
+    modify the associated signature's behavior at any time, as well as to revoke
+    it. Or you can specify one or more of the access policy parameters within
+    the stored access policy, and the others on the URL. Finally, you can
+    specify all of the parameters on the URL. In this case, you can use the
+    stored access policy to revoke the signature, but not to modify its behavior.
+
+    Together the Shared Access Signature and the stored access policy must
+    include all fields required to authenticate the signature. If any required
+    fields are missing, the request will fail. Likewise, if a field is specified
+    both in the Shared Access Signature URL and in the stored access policy, the
+    request will fail with status code 400 (Bad Request).
+
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: str or ~azure.storage.fileshare.FileSasPermissions or
+        ~azure.storage.fileshare.ShareSasPermissions
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str
+    """
+
+    permission: Optional[Union[ShareSasPermissions, str]]  # type: ignore [assignment]
+    """The permissions associated with the shared access signature. The user is restricted to
+        operations allowed by the permissions."""
+    expiry: Optional[Union["datetime", str]]  # type: ignore [assignment]
+    """The time at which the shared access signature becomes invalid."""
+    start: Optional[Union["datetime", str]]  # type: ignore [assignment]
+    """The time at which the shared access signature becomes valid."""
+
+    def __init__(
+        self, permission: Optional[Union[ShareSasPermissions, str]] = None,
+        expiry: Optional[Union["datetime", str]] = None,
+        start: Optional[Union["datetime", str]] = None
+    ) -> None:
+        self.start = start
+        self.expiry = expiry
+        self.permission = permission
+
+
+class LeaseProperties(DictMixin):
+    """File or Share Lease Properties."""
+
+    status: str
+    """The lease status of the file or share. Possible values: locked|unlocked"""
+    state: str
+    """Lease state of the file or share. Possible values: available|leased|expired|breaking|broken"""
+    duration: Optional[str]
+    """When a file or share is leased, specifies whether the lease is of infinite or fixed duration."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
+        self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
+        self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
+
+    @classmethod
+    def _from_generated(cls, generated):
+        lease = cls()
+        lease.status = get_enum_value(generated.properties.lease_status)
+        lease.state = get_enum_value(generated.properties.lease_state)
+        lease.duration = get_enum_value(generated.properties.lease_duration)
+        return lease
+
+
+class ContentSettings(DictMixin):
+    """Used to store the content settings of a file.
+
+    :param Optional[str] content_type:
+        The content type specified for the file. If no content type was
+        specified, the default content type is application/octet-stream.
+    :param Optional[str] content_encoding:
+        If the content_encoding has previously been set
+        for the file, that value is stored.
+    :param Optional[str] content_language:
+        If the content_language has previously been set
+        for the file, that value is stored.
+    :param Optional[str] content_disposition:
+        content_disposition conveys additional information about how to
+        process the response payload, and also can be used to attach
+        additional metadata. If content_disposition has previously been set
+        for the file, that value is stored.
+    :param Optional[str] cache_control:
+        If the cache_control has previously been set for
+        the file, that value is stored.
+    :param Optional[bytearray] content_md5:
+        If the content_md5 has been set for the file, this response
+        header is stored so that the client can check for message content
+        integrity.
+    """
+
+    content_type: Optional[str] = None
+    """The content type specified for the file."""
+    content_encoding: Optional[str] = None
+    """The content encoding specified for the file."""
+    content_language: Optional[str] = None
+    """The content language specified for the file."""
+    content_disposition: Optional[str] = None
+    """The content disposition specified for the file."""
+    cache_control: Optional[str] = None
+    """The cache control specified for the file."""
+    content_md5: Optional[bytearray] = None
+    """The content md5 specified for the file."""
+
+    def __init__(
+        self, content_type: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_md5: Optional[bytearray] = None,
+        **kwargs: Any
+    ) -> None:
+        self.content_type = content_type or kwargs.get('Content-Type')
+        self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
+        self.content_language = content_language or kwargs.get('Content-Language')
+        self.content_md5 = content_md5 or kwargs.get('Content-MD5')
+        self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
+        self.cache_control = cache_control or kwargs.get('Cache-Control')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        settings = cls()
+        settings.content_type = generated.properties.content_type or None
+        settings.content_encoding = generated.properties.content_encoding or None
+        settings.content_language = generated.properties.content_language or None
+        settings.content_md5 = generated.properties.content_md5 or None
+        settings.content_disposition = generated.properties.content_disposition or None
+        settings.cache_control = generated.properties.cache_control or None
+        return settings
+
+
+class ShareProperties(DictMixin):
+    """Share's properties class."""
+
+    name: str
+    """The name of the share."""
+    last_modified: "datetime"
+    """A datetime object representing the last time the share was modified."""
+    etag: str
+    """The ETag contains a value that you can use to perform operations conditionally."""
+    quota: int
+    """The allocated quota."""
+    access_tier: str
+    """The share's access tier.'"""
+    next_allowed_quota_downgrade_time: Optional[str] = None
+    """The share's next allowed quota downgrade time."""
+    metadata: Dict[str, str]
+    """Name-value pairs associate with the share as metadata."""
+    snapshot: Optional[str] = None
+    """Snapshot of the share."""
+    deleted: Optional[bool] = None
+    """Whether this share was deleted. 
+        This is a service returned value, and the value will be set when list shared including deleted ones."""
+    deleted_time: Optional["datetime"] = None
+    """A datetime object representing the time at which the share was deleted.
+        This is a service returned value, and the value will be set when list shared including deleted ones."""
+    version: Optional[str] = None
+    """To indicate the version of deleted share.
+        This is a service returned value, and the value will be set when list shared including deleted ones."""
+    remaining_retention_days: Optional[int] = None
+    """The number of days that the share will be retained before being permanently deleted by the service.
+        This is a service returned value, and the value will be set when list shared including deleted ones."""
+    provisioned_egress_mbps: Optional[int] = None
+    """Provisioned egress in megabits/second. Only applicable to premium file accounts."""
+    provisioned_ingress_mbps: Optional[int] = None
+    """Provisioned ingress in megabits/second. Only applicable to premium file accounts."""
+    provisioned_iops: Optional[int] = None
+    """Provisioned input/output operators per second (iops). Only applicable to premium file accounts."""
+    provisioned_bandwidth: Optional[int] = None
+    """Provisioned bandwidth in megabits/second. Only applicable to premium file accounts."""
+    lease: LeaseProperties
+    """Share lease properties."""
+    protocols: Optional[List[str]] = None
+    """Indicates the protocols enabled on the share. The protocol can be either SMB or NFS."""
+    root_squash: Optional[Union["ShareRootSquash", str]] = None
+    """Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'."""
+    enable_snapshot_virtual_directory_access: Optional[bool] = None
+    """Specifies whether the snapshot virtual directory should be accessible at the root of the share
+        mount point when NFS is enabled. if not specified, the default is True."""
+    paid_bursting_enabled: Optional[int] = None
+    """This property enables paid bursting."""
+    paid_bursting_bandwidth_mibps: Optional[int] = None
+    """The maximum throughput the file share can support in MiB/s."""
+    paid_bursting_iops: Optional[int] = None
+    """The maximum IOPS the file share can support."""
+    next_provisioned_iops_downgrade: Optional["datetime"]
+    """The share's next allowed provisioned throughput downgrade time."""
+    next_provisioned_bandwidth_downgrade: Optional["datetime"]
+    """The share's next allowed provisioned bandwidth downgrade time."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = None  # type: ignore [assignment]
+        self.last_modified = kwargs.get('Last-Modified')  # type: ignore [assignment]
+        self.etag = kwargs.get('ETag')  # type: ignore [assignment]
+        self.quota = kwargs.get('x-ms-share-quota')  # type: ignore [assignment]
+        self.access_tier = kwargs.get('x-ms-access-tier')  # type: ignore [assignment]
+        self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time')
+        self.metadata = kwargs.get('metadata')  # type: ignore [assignment]
+        self.snapshot = None
+        self.deleted = None
+        self.deleted_time = None
+        self.version = None
+        self.remaining_retention_days = None
+        self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps')
+        self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps')
+        self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops')
+        self.provisioned_bandwidth = kwargs.get('x-ms-share-provisioned-bandwidth-mibps')
+        self.lease = LeaseProperties(**kwargs)
+        self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\
+            if kwargs.get('x-ms-enabled-protocols', None) else None
+        self.root_squash = kwargs.get('x-ms-root-squash', None)
+        self.enable_snapshot_virtual_directory_access = \
+            kwargs.get('x-ms-enable-snapshot-virtual-directory-access')
+        self.paid_bursting_enabled = kwargs.get('x-ms-share-paid-bursting-enabled')
+        self.paid_bursting_bandwidth_mibps = kwargs.get('x-ms-share-paid-bursting-max-bandwidth-mibps')
+        self.paid_bursting_iops = kwargs.get('x-ms-share-paid-bursting-max-iops')
+        self.included_burst_iops = kwargs.get('x-ms-share-included-burst-iops')
+        self.max_burst_credits_for_iops = kwargs.get('x-ms-share-max-burst-credits-for-iops')
+        self.next_provisioned_iops_downgrade = (  # pylint: disable=name-too-long
+            kwargs.get('x-ms-share-next-allowed-provisioned-iops-downgrade-time'))
+        self.next_provisioned_bandwidth_downgrade = (  # pylint: disable=name-too-long
+            kwargs.get('x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time'))
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = generated.name
+        props.last_modified = generated.properties.last_modified
+        props.etag = generated.properties.etag
+        props.quota = generated.properties.quota
+        props.access_tier = generated.properties.access_tier
+        props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time
+        props.metadata = generated.metadata
+        props.snapshot = generated.snapshot
+        props.deleted = generated.deleted
+        props.deleted_time = generated.properties.deleted_time
+        props.version = generated.version
+        props.remaining_retention_days = generated.properties.remaining_retention_days
+        props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps
+        props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps
+        props.provisioned_iops = generated.properties.provisioned_iops
+        props.provisioned_bandwidth = generated.properties.provisioned_bandwidth_mi_bps
+        props.lease = LeaseProperties._from_generated(generated)  # pylint: disable=protected-access
+        props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\
+            if generated.properties.enabled_protocols else None
+        props.root_squash = generated.properties.root_squash
+        props.enable_snapshot_virtual_directory_access = generated.properties.enable_snapshot_virtual_directory_access
+        props.paid_bursting_enabled = generated.properties.paid_bursting_enabled
+        props.paid_bursting_bandwidth_mibps = generated.properties.paid_bursting_max_bandwidth_mibps
+        props.paid_bursting_iops = generated.properties.paid_bursting_max_iops
+        props.included_burst_iops = generated.properties.included_burst_iops
+        props.max_burst_credits_for_iops = generated.properties.max_burst_credits_for_iops
+        props.next_provisioned_iops_downgrade = (  # pylint: disable=name-too-long
+            generated.properties.next_allowed_provisioned_iops_downgrade_time)
+        props.next_provisioned_bandwidth_downgrade = (  # pylint: disable=name-too-long
+            generated.properties.next_allowed_provisioned_bandwidth_downgrade_time)
+        return props
+
+
+class SharePropertiesPaged(PageIterator):
+    """An iterable of Share properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only shares whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token to retrieve the next page of results.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A filename prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results to retrieve per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: List[ShareProperties]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(SharePropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                prefix=self.prefix,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class Handle(DictMixin):
+    """A listed Azure Storage handle item.
+
+    All required parameters must be populated in order to send to Azure.
+
+    :keyword str client_name: Name of the client machine where the share is being mounted.
+    :keyword str handle_id: XSMB service handle ID.
+    :keyword str path: File or directory name including full path starting from share root.
+    :keyword str file_id: FileId uniquely identifies the file or directory.
+    :keyword str parent_id: ParentId uniquely identifies the parent directory of the object.
+    :keyword str session_id: SMB session ID in context of which the file handle was opened.
+    :keyword str client_ip: Client IP that opened the handle.
+    :keyword ~datetime.datetime open_time: Time when the session that previously opened
+        the handle has last been reconnected. (UTC)
+    :keyword Optional[~datetime.datetime] last_reconnect_time: Time handle was last connected to. (UTC)
+    :keyword access_rights: Access rights of the handle.
+    :paramtype access_rights: List[Literal['Read', 'Write', 'Delete']]
+    """
+
+    client_name: str
+    """Name of the client machine where the share is being mounted."""
+    id: str
+    """XSMB service handle ID."""
+    path: str
+    """File or directory name including full path starting from share root."""
+    file_id: str
+    """FileId uniquely identifies the file or directory."""
+    parent_id: str
+    """ParentId uniquely identifies the parent directory of the object."""
+    session_id: str
+    """SMB session ID in context of which the file handle was opened."""
+    client_ip: str
+    """Client IP that opened the handle."""
+    open_time: "datetime"
+    """Time when the session that previously opened the handle was last been reconnected. (UTC)"""
+    last_reconnect_time: Optional["datetime"]
+    """Time handle that was last connected to. (UTC)"""
+    access_rights: List[Literal['Read', 'Write', 'Delete']]
+    """Access rights of the handle."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.client_name = kwargs.get('client_name')  # type: ignore [assignment]
+        self.id = kwargs.get('handle_id')  # type: ignore [assignment]
+        self.path = kwargs.get('path')  # type: ignore [assignment]
+        self.file_id = kwargs.get('file_id')  # type: ignore [assignment]
+        self.parent_id = kwargs.get('parent_id')  # type: ignore [assignment]
+        self.session_id = kwargs.get('session_id')  # type: ignore [assignment]
+        self.client_ip = kwargs.get('client_ip')  # type: ignore [assignment]
+        self.open_time = kwargs.get('open_time')  # type: ignore [assignment]
+        self.last_reconnect_time = kwargs.get('last_reconnect_time')
+        self.access_rights = kwargs.get('access_right_list')  # type: ignore [assignment]
+
+    @classmethod
+    def _from_generated(cls, generated):
+        handle = cls()
+        handle.client_name = generated.client_name
+        handle.id = generated.handle_id
+        handle.path = unquote(generated.path.content) if generated.path.encoded else generated.path.content
+        handle.file_id = generated.file_id
+        handle.parent_id = generated.parent_id
+        handle.session_id = generated.session_id
+        handle.client_ip = generated.client_ip
+        handle.open_time = generated.open_time
+        handle.last_reconnect_time = generated.last_reconnect_time
+        handle.access_rights = generated.access_right_list
+        return handle
+
+
+class HandlesPaged(PageIterator):
+    """An iterable of Handles.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token to retrieve the next page of results.
+    """
+
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results.
+        The available options include "primary" and "secondary"."""
+    current_page: List[Handle]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(HandlesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = [Handle._from_generated(h) for h in self._response.handle_list]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class NTFSAttributes(object):
+    """Valid set of attributes to set for file or directory.
+
+    To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory.
+    """
+
+    read_only: bool = False
+    """Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE."""
+    hidden: bool = False
+    """Enable/disable 'Hidden' attribute for DIRECTORY or FILE."""
+    system: bool = False
+    """Enable/disable 'System' attribute for DIRECTORY or FILE."""
+    none: bool = False
+    """Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY."""
+    directory: bool = False
+    """Enable/disable 'Directory' attribute for DIRECTORY."""
+    archive: bool = False
+    """Enable/disable 'Archive' attribute for DIRECTORY."""
+    temporary: bool = False
+    """Enable/disable 'Temporary' attribute for DIRECTORY."""
+    offline: bool = False
+    """Enable/disable 'Offline' attribute for DIRECTORY."""
+    not_content_indexed: bool = False
+    """Enable/disable 'NotContentIndexed' attribute for DIRECTORY."""
+    no_scrub_data: bool = False
+    """Enable/disable 'NoScrubData' attribute for DIRECTORY."""
+
+    def __init__(
+        self, read_only: bool = False,
+        hidden: bool = False,
+        system: bool = False,
+        none: bool = False,
+        directory: bool = False,
+        archive: bool = False,
+        temporary: bool = False,
+        offline: bool = False,
+        not_content_indexed: bool = False,
+        no_scrub_data: bool = False
+    ) -> None:
+        self.read_only = read_only
+        self.hidden = hidden
+        self.system = system
+        self.none = none
+        self.directory = directory
+        self.archive = archive
+        self.temporary = temporary
+        self.offline = offline
+        self.not_content_indexed = not_content_indexed
+        self.no_scrub_data = no_scrub_data
+        self._str = (('ReadOnly|' if self.read_only else '') +
+                               ('Hidden|' if self.hidden else '') +
+                               ('System|' if self.system else '') +
+                               ('None|' if self.none else '') +
+                               ('Directory|' if self.directory else '') +
+                               ('Archive|' if self.archive else '') +
+                               ('Temporary|' if self.temporary else '') +
+                               ('Offline|' if self.offline else '') +
+                               ('NotContentIndexed|' if self.not_content_indexed else '') +
+                               ('NoScrubData|' if self.no_scrub_data else ''))
+
+    def __str__(self):
+        concatenated_params = self._str
+        return concatenated_params.strip('|')
+
+    @classmethod
+    def from_string(cls, string: str) -> Self:
+        """Create a NTFSAttributes from a string.
+
+        To specify permissions you can pass in a string with the
+        desired permissions, e.g. "ReadOnly|Hidden|System"
+
+        :param str string: The string which dictates the permissions.
+        :return: A NTFSAttributes object
+        :rtype: ~azure.storage.fileshare.NTFSAttributes
+        """
+        read_only = "ReadOnly" in string
+        hidden = "Hidden" in string
+        system = "System" in string
+        none = "None" in string
+        directory = "Directory" in string
+        archive = "Archive" in string
+        temporary = "Temporary" in string
+        offline = "Offline" in string
+        not_content_indexed = "NotContentIndexed" in string
+        no_scrub_data = "NoScrubData" in string
+
+        parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed,
+                     no_scrub_data)
+        parsed._str = string
+        return parsed
+
+
+class DirectoryProperties(DictMixin):
+    """Directory's properties class."""
+
+    name: str
+    """The name of the directory."""
+    last_modified: "datetime"
+    """A datetime object representing the last time the directory was modified."""
+    etag: str
+    """The ETag contains a value that you can use to perform operations conditionally."""
+    server_encrypted: bool
+    """Whether encryption is enabled."""
+    metadata: Dict[str, str]
+    """Name_value pairs to associate with the directory as metadata."""
+    change_time: Optional[Union[str, "datetime"]] = None
+    """Change time for the file."""
+    creation_time: Optional[Union[str, "datetime"]] = None
+    """Creation time for the file."""
+    last_write_time: Optional[Union[str, "datetime"]] = None
+    """Last write time for the file."""
+    last_access_time: Optional["datetime"] = None
+    """Last access time for the file."""
+    file_attributes: Union[str, NTFSAttributes]
+    """The file system attributes for files and directories."""
+    permission_key: str
+    """Key of the permission to be set for the directory/file."""
+    file_id: str
+    """FileId uniquely identifies the file or directory."""
+    parent_id: str
+    """ParentId uniquely identifies the parent directory of the object."""
+    is_directory: bool = True
+    """Whether input is a directory."""
+    owner: Optional[str] = None
+    """NFS only. The owner of the directory."""
+    group: Optional[str] = None
+    """NFS only. The owning group of the directory."""
+    file_mode: Optional[str] = None
+    """NFS only. The file mode of the directory."""
+    nfs_file_type: Optional[Literal['Directory']] = None
+    """NFS only. The type of the directory."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = None  # type: ignore [assignment]
+        self.last_modified = kwargs.get('Last-Modified')  # type: ignore [assignment]
+        self.etag = kwargs.get('ETag')  # type: ignore [assignment]
+        self.server_encrypted = kwargs.get('x-ms-server-encrypted')  # type: ignore [assignment]
+        self.metadata = kwargs.get('metadata')  # type: ignore [assignment]
+        self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
+        self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
+        self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
+        self.last_access_time = None
+        self.file_attributes = kwargs.get('x-ms-file-attributes')  # type: ignore [assignment]
+        self.permission_key = kwargs.get('x-ms-file-permission-key')  # type: ignore [assignment]
+        self.file_id = kwargs.get('x-ms-file-id')  # type: ignore [assignment]
+        self.parent_id = kwargs.get('x-ms-file-parent-id')  # type: ignore [assignment]
+        self.is_directory = True
+        self.owner = kwargs.get('x-ms-owner')
+        self.group = kwargs.get('x-ms-group')
+        self.file_mode = kwargs.get('x-ms-mode')
+        self.nfs_file_type = kwargs.get('x-ms-file-file-type')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = unquote(generated.name.content) if generated.name.encoded else generated.name.content
+        props.file_id = generated.file_id
+        props.file_attributes = generated.attributes
+        props.last_modified = generated.properties.last_modified
+        props.creation_time = generated.properties.creation_time
+        props.last_access_time = generated.properties.last_access_time
+        props.last_write_time = generated.properties.last_write_time
+        props.change_time = generated.properties.change_time
+        props.etag = generated.properties.etag
+        props.permission_key = generated.permission_key
+        return props
+
+
+class DirectoryPropertiesPaged(PageIterator):
+    """An iterable for the contents of a directory.
+
+    This iterable will yield dicts for the contents of the directory. The dicts
+    will have the keys 'name' (str) and 'is_directory' (bool).
+    Items that are files (is_directory=False) will have an additional 'content_length' key.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only directories whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A file name prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str] = None
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available options include "primary" and "secondary"."""
+    current_page: List[Dict[str, Any]]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(DirectoryPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    def _get_next_cb(self, continuation_token):
+        try:
+            return self._command(
+                marker=continuation_token or None,
+                prefix=self.prefix,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access
+        self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class CopyProperties(DictMixin):
+    """File Copy Properties.
+
+    These properties will be `None` if this file has never been the destination in a Copy
+    File operation, or if this file has been modified after a concluded Copy File operation.
+    """
+
+    id: str
+    """String identifier for the last attempted Copy File operation where this file
+        was the destination file. This header does not appear if this file has never
+        been the destination in a Copy File operation, or if this file has been
+        modified after a concluded Copy File operation."""
+    source: Optional[str] = None
+    """URL up to 2 KB in length that specifies the source file used in the last attempted
+        Copy File operation where this file was the destination file."""
+    status: Optional[str] = None
+    """State of the copy operation identified by Copy ID, with these values:
+            success:
+                Copy completed successfully.
+            pending:
+                Copy is in progress. Check copy_status_description if intermittent,
+                non-fatal errors impede copy progress but don't cause failure.
+            aborted:
+                Copy was ended by Abort Copy File.
+            failed:
+                Copy failed. See copy_status_description for failure details."""
+    progress: Optional[str] = None
+    """Contains the number of bytes copied and the total bytes in the source in the last
+        attempted Copy File operation where this file was the destination file. Can show
+        between 0 and Content-Length bytes copied."""
+    status_description: Optional[str] = None
+    """Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
+        or non-fatal copy operation failure."""
+    incremental_copy: Optional[bool] = None
+    """Copies the snapshot of the source file to a destination file.
+        The snapshot is copied such that only the differential changes between
+        the previously copied snapshot are transferred to the destination."""
+    destination_snapshot: Optional["datetime"] = None
+    """Included if the file is incremental copy or incremental copy snapshot,
+        if x-ms-copy-status is success. Snapshot time of the last successful
+        incremental copy snapshot for this file."""
+    datetime: Optional["datetime"] = None
+    """Conclusion time of the last attempted Copy File operation where this file was the
+        destination file. This value can specify the time of a completed, aborted, or
+        failed copy attempt."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.id = kwargs.get('x-ms-copy-id')  # type: ignore [assignment]
+        self.source = kwargs.get('x-ms-copy-source')
+        self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
+        self.progress = kwargs.get('x-ms-copy-progress')
+        self.completion_time = kwargs.get('x-ms-copy-completion_time')
+        self.status_description = kwargs.get('x-ms-copy-status-description')
+        self.incremental_copy = kwargs.get('x-ms-incremental-copy')
+        self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        copy = cls()
+        copy.id = generated.properties.copy_id or None
+        copy.status = get_enum_value(generated.properties.copy_status) or None
+        copy.source = generated.properties.copy_source or None
+        copy.progress = generated.properties.copy_progress or None
+        copy.completion_time = generated.properties.copy_completion_time or None
+        copy.status_description = generated.properties.copy_status_description or None
+        copy.incremental_copy = generated.properties.incremental_copy or None
+        copy.destination_snapshot = generated.properties.destination_snapshot or None
+        return copy
+
+
+class FileProperties(DictMixin):
+    """File's properties class."""
+
+    name: str
+    """The name of the file."""
+    path: Optional[str] = None
+    """The path of the file."""
+    share: Optional[str] = None
+    """The name of the share."""
+    snapshot: Optional[str] = None
+    """File snapshot."""
+    content_length: int
+    """Size of file in bytes."""
+    metadata: Dict[str, str]
+    """Name-value pairs to associate with the file as metadata."""
+    file_type: str
+    """String indicating the type of file."""
+    last_modified: "datetime"
+    """A datetime object representing the last time the file was modified."""
+    etag: str
+    """The ETag contains a value that can be used to perform operations conditionally."""
+    size: int
+    """Size of the file in bytes."""
+    content_range: Optional[str] = None
+    """Indicates the range of bytes returned in the event that the client
+        requested a subset of the file."""
+    server_encrypted: bool
+    """Whether encryption is enabled."""
+    copy: CopyProperties
+    """The copy properties."""
+    content_settings: ContentSettings
+    """The content settings for the file."""
+    lease: LeaseProperties
+    """File lease properties."""
+    change_time: Optional[Union[str, "datetime"]] = None
+    """Change time for the file."""
+    creation_time: Optional[Union[str, "datetime"]] = None
+    """Creation time for the file."""
+    last_write_time: Optional[Union[str, "datetime"]] = None
+    """Last write time for the file."""
+    last_access_time: Optional["datetime"] = None
+    """Last access time for the file."""
+    file_attributes: Union[str, NTFSAttributes]
+    """The file system attributes for files and directories."""
+    permission_key: str
+    """Key of the permission to be set for the directory/file."""
+    file_id: str
+    """FileId uniquely identifies the file or directory."""
+    parent_id: Optional[str] = None
+    """ParentId uniquely identifies the parent directory of the object."""
+    is_directory: bool = False
+    """Whether input is a directory."""
+    owner: Optional[str] = None
+    """NFS only. The owner of the file."""
+    group: Optional[str] = None
+    """NFS only. The owning group of the file."""
+    file_mode: Optional[str] = None
+    """NFS only. The file mode of the file."""
+    link_count: Optional[int] = None
+    """NFS only. The number of hard links of the file."""
+    nfs_file_type: Optional[Literal['Regular']] = None
+    """NFS only. The type of the file."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.name = kwargs.get('name')  # type: ignore [assignment]
+        self.path = None
+        self.share = None
+        self.snapshot = None
+        self.content_length = kwargs.get('Content-Length')  # type: ignore [assignment]
+        self.metadata = kwargs.get('metadata')  # type: ignore [assignment]
+        self.file_type = kwargs.get('x-ms-type')  # type: ignore [assignment]
+        self.last_modified = kwargs.get('Last-Modified')  # type: ignore [assignment]
+        self.etag = kwargs.get('ETag')  # type: ignore [assignment]
+        self.size = kwargs.get('Content-Length')  # type: ignore [assignment]
+        self.content_range = kwargs.get('Content-Range')
+        self.server_encrypted = kwargs.get('x-ms-server-encrypted')  # type: ignore [assignment]
+        self.copy = CopyProperties(**kwargs)
+        self.content_settings = ContentSettings(**kwargs)
+        self.lease = LeaseProperties(**kwargs)
+        self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
+        self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
+        self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
+        self.last_access_time = None
+        self.file_attributes = kwargs.get('x-ms-file-attributes')  # type: ignore [assignment]
+        self.permission_key = kwargs.get('x-ms-file-permission-key')  # type: ignore [assignment]
+        self.file_id = kwargs.get('x-ms-file-id')  # type: ignore [assignment]
+        self.parent_id = kwargs.get('x-ms-file-parent-id')
+        self.is_directory = False
+        self.owner = kwargs.get('x-ms-owner')
+        self.group = kwargs.get('x-ms-group')
+        self.file_mode = kwargs.get('x-ms-mode')
+        self.link_count = kwargs.get('x-ms-link-count')
+        self.nfs_file_type = kwargs.get('x-ms-file-file-type')
+
+    @classmethod
+    def _from_generated(cls, generated):
+        props = cls()
+        props.name = unquote(generated.name.content) if generated.name.encoded else generated.name.content
+        props.file_id = generated.file_id
+        props.etag = generated.properties.etag
+        props.file_attributes = generated.attributes
+        props.last_modified = generated.properties.last_modified
+        props.creation_time = generated.properties.creation_time
+        props.last_access_time = generated.properties.last_access_time
+        props.last_write_time = generated.properties.last_write_time
+        props.change_time = generated.properties.change_time
+        props.size = generated.properties.content_length
+        props.permission_key = generated.permission_key
+        return props
+
+
+class ShareProtocols(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+    """Enabled protocols on the share"""
+    SMB = "SMB"
+    NFS = "NFS"
+
+
+class FileSasPermissions(object):
+    """FileSasPermissions class to be used with
+    generating shared access signature operations.
+
+    :param bool read:
+        Read the content, properties, metadata. Use the file as the source of a copy operation.
+    :param bool create:
+        Create a new file or copy a file to a new file.
+    :param bool write:
+        Create or write content, properties, metadata. Resize the file. Use the file
+        as the destination of a copy operation within the same account.
+    :param bool delete:
+        Delete the file.
+    """
+
+    read: bool = False
+    """Read the content, properties, metadata. Use the file as the source of a copy operation."""
+    create: bool = False
+    """Create a new file or copy a file to a new file."""
+    write: bool = False
+    """Create or write content, properties, metadata. Resize the file. Use the file
+        as the destination of a copy operation within the same account."""
+    delete: bool = False
+    """Delete the file."""
+
+    def __init__(
+        self, read: bool = False,
+        create: bool = False,
+        write: bool = False,
+        delete: bool = False
+    ) -> None:
+        self.read = read
+        self.create = create
+        self.write = write
+        self.delete = delete
+        self._str = (('r' if self.read else '') +
+                     ('c' if self.create else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission: str) -> Self:
+        """Create a FileSasPermissions from a string.
+
+        To specify read, create, write, or delete permissions you need only to
+        include the first letter of the word in the string. E.g. For read and
+        create permissions, you would provide a string "rc".
+
+        :param str permission: The string which dictates the read, create,
+            write, or delete permissions
+        :return: A FileSasPermissions object
+        :rtype: ~azure.storage.fileshare.FileSasPermissions
+        """
+        p_read = 'r' in permission
+        p_create = 'c' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+
+        parsed = cls(p_read, p_create, p_write, p_delete)
+
+        return parsed
+
+
+def service_properties_deserialize(generated: GeneratedStorageServiceProperties) -> Dict[str, Any]:
+    return {
+        'hour_metrics': Metrics._from_generated(generated.hour_metrics),  # pylint: disable=protected-access
+        'minute_metrics': Metrics._from_generated(generated.minute_metrics),  # pylint: disable=protected-access
+        'cors': [CorsRule._from_generated(cors) for cors in generated.cors],  # type: ignore [union-attr] # pylint: disable=protected-access
+        'protocol': ShareProtocolSettings._from_generated(generated.protocol),  # pylint: disable=protected-access
+    }
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_parser.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_parser.py
new file mode 100644
index 00000000..ed7c9488
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_parser.py
@@ -0,0 +1,60 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import datetime, timedelta
+from typing import Any, cast, Dict, Optional, Union
+
+from ._generated._serialization import Serializer
+
+_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time'
+_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \
+                            'please use file_permission_key'
+
+
+def _get_file_permission(file_permission, file_permission_key, default_permission):
+    # if file_permission and file_permission_key are both empty, then use the default_permission
+    # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used
+    if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024:
+        raise ValueError(_FILE_PERMISSION_TOO_LONG)
+
+    if not file_permission:
+        if not file_permission_key:
+            return default_permission
+        return None
+
+    if not file_permission_key:
+        return file_permission
+
+    raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS)
+
+
+def _parse_datetime_from_str(string_datetime):
+    if not string_datetime:
+        return None
+    dt, _, us = string_datetime.partition(".")
+    dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
+    us = int(us[:-2])  # microseconds
+    datetime_obj = dt + timedelta(microseconds=us)
+    return datetime_obj
+
+
+def _datetime_to_str(datetime_obj):
+    if not datetime_obj:
+        return None
+    if isinstance(datetime_obj, str):
+        return datetime_obj
+    return Serializer.serialize_iso(datetime_obj)[:-1].ljust(27, "0") + "Z"
+
+
+def _parse_snapshot(
+    snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+    path_snapshot: Optional[str] = None
+) -> Optional[str]:
+    if hasattr(snapshot, 'snapshot'):
+        return snapshot.snapshot  # type: ignore
+    if isinstance(snapshot, Dict):
+        return cast(str, snapshot['snapshot'])
+    return snapshot or path_snapshot
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_serialize.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_serialize.py
new file mode 100644
index 00000000..4f989315
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_serialize.py
@@ -0,0 +1,195 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Any, Dict, Optional, Tuple, Union, TYPE_CHECKING
+
+from azure.core import MatchConditions
+
+from ._parser import _datetime_to_str, _get_file_permission
+from ._generated.models import (
+    SourceModifiedAccessConditions,
+    LeaseAccessConditions,
+    SourceLeaseAccessConditions,
+    DestinationLeaseAccessConditions,
+    CopyFileSmbInfo
+)
+
+if TYPE_CHECKING:
+    from ._lease import ShareLeaseClient
+    from .aio._lease_async import ShareLeaseClient as ShareLeaseClientAsync
+
+
+_SUPPORTED_API_VERSIONS = [
+    '2019-02-02',
+    '2019-07-07',
+    '2019-10-10',
+    '2019-12-12',
+    '2020-02-10',
+    '2020-04-08',
+    '2020-06-12',
+    '2020-08-04',
+    '2020-10-02',
+    '2020-12-06',
+    '2021-02-12',
+    '2021-04-10',
+    '2021-06-08',
+    '2021-08-06',
+    '2021-12-02',
+    '2022-11-02',
+    '2023-01-03',
+    '2023-05-03',
+    '2023-08-03',
+    '2023-11-03',
+    '2024-05-04',
+    '2024-08-04',
+    '2024-11-04',
+    '2025-01-05',
+    '2025-05-05',
+]
+
+
+def _get_match_headers(
+    kwargs: Dict[str, Any],
+    match_param: str,
+    etag_param: str
+) -> Tuple[Optional[str], Optional[str]]:
+    if_match = None
+    if_none_match = None
+    match_condition = kwargs.pop(match_param, None)
+    if match_condition == MatchConditions.IfNotModified:
+        if_match = kwargs.pop(etag_param, None)
+        if not if_match:
+            raise ValueError(f"'{match_param}' specified without '{etag_param}'.")
+    elif match_condition == MatchConditions.IfPresent:
+        if_match = '*'
+    elif match_condition == MatchConditions.IfModified:
+        if_none_match = kwargs.pop(etag_param, None)
+        if not if_none_match:
+            raise ValueError(f"'{match_param}' specified without '{etag_param}'.")
+    elif match_condition == MatchConditions.IfMissing:
+        if_none_match = '*'
+    elif match_condition is None:
+        if etag_param in kwargs:
+            raise ValueError(f"'{etag_param}' specified without '{match_param}'.")
+    else:
+        raise TypeError(f"Invalid match condition: {match_condition}")
+    return if_match, if_none_match
+
+
+def get_source_conditions(kwargs: Dict[str, Any]) -> SourceModifiedAccessConditions:
+    if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+    return SourceModifiedAccessConditions(
+        source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+        source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+        source_if_match=if_match or kwargs.pop('source_if_match', None),
+        source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
+    )
+
+
+def get_access_conditions(
+    lease: Optional[Union["ShareLeaseClient", "ShareLeaseClientAsync", str]]
+) -> Optional[LeaseAccessConditions]:
+    if lease is None:
+        return None
+    if hasattr(lease, "id"):
+        lease_id = lease.id
+    else:
+        lease_id = lease
+    return LeaseAccessConditions(lease_id=lease_id)
+
+
+def get_source_access_conditions(
+    lease: Optional[Union["ShareLeaseClient", "ShareLeaseClientAsync", str]]
+) -> Optional[SourceLeaseAccessConditions]:
+    if lease is None:
+        return None
+    if hasattr(lease, "id"):
+        lease_id = lease.id
+    else:
+        lease_id = lease
+    return SourceLeaseAccessConditions(source_lease_id=lease_id)
+
+
+def get_dest_access_conditions(
+    lease: Optional[Union["ShareLeaseClient", "ShareLeaseClientAsync", str]]
+) -> Optional[DestinationLeaseAccessConditions]:
+    if lease is None:
+        return None
+    if hasattr(lease, "id"):
+        lease_id = lease.id
+    else:
+        lease_id = lease
+    return DestinationLeaseAccessConditions(destination_lease_id=lease_id)
+
+
+def get_smb_properties(kwargs: Dict[str, Any]) -> Dict[str, Any]:
+    ignore_read_only = kwargs.pop('ignore_read_only', None)
+    set_archive_attribute = kwargs.pop('set_archive_attribute', None)
+    file_permission = kwargs.pop('file_permission', None)
+    file_permission_key = kwargs.pop('permission_key', None)
+    file_attributes = kwargs.pop('file_attributes', None)
+    file_creation_time = kwargs.pop('file_creation_time', None)
+    file_last_write_time = kwargs.pop('file_last_write_time', None)
+    file_change_time = kwargs.pop('file_change_time', None)
+
+    file_permission_copy_mode = None
+    file_permission = _get_file_permission(file_permission, file_permission_key, None)
+
+    if file_permission:
+        if file_permission.lower() == "source":
+            file_permission = None
+            file_permission_copy_mode = "source"
+        else:
+            file_permission_copy_mode = "override"
+    elif file_permission_key:
+        if file_permission_key.lower() == "source":
+            file_permission_key = None
+            file_permission_copy_mode = "source"
+        else:
+            file_permission_copy_mode = "override"
+    return {
+        'file_permission': file_permission,
+        'file_permission_key': file_permission_key,
+        'copy_file_smb_info': CopyFileSmbInfo(
+            file_permission_copy_mode=file_permission_copy_mode,
+            ignore_read_only=ignore_read_only,
+            file_attributes=file_attributes,
+            file_creation_time=_datetime_to_str(file_creation_time),
+            file_last_write_time=_datetime_to_str(file_last_write_time),
+            file_change_time=_datetime_to_str(file_change_time),
+            set_archive_attribute=set_archive_attribute
+        )
+
+    }
+
+
+def get_rename_smb_properties(kwargs: Dict[str, Any]) -> Dict[str, Any]:
+    file_permission = kwargs.pop('file_permission', None)
+    file_permission_key = kwargs.pop('permission_key', None)
+    file_attributes = kwargs.pop('file_attributes', None)
+    file_creation_time = kwargs.pop('file_creation_time', None)
+    file_last_write_time = kwargs.pop('file_last_write_time', None)
+    file_change_time = kwargs.pop('file_change_time', None)
+
+    file_permission = _get_file_permission(file_permission, file_permission_key, None)
+
+    return {
+        'file_permission': file_permission,
+        'file_permission_key': file_permission_key,
+        'copy_file_smb_info': CopyFileSmbInfo(
+            file_attributes=file_attributes,
+            file_creation_time=_datetime_to_str(file_creation_time),
+            file_last_write_time=_datetime_to_str(file_last_write_time),
+            file_change_time=_datetime_to_str(file_change_time)
+        )}
+
+
+def get_api_version(kwargs: Dict[str, Any]) -> str:
+    api_version = kwargs.get('api_version', None)
+    if api_version and api_version not in _SUPPORTED_API_VERSIONS:
+        versions = '\n'.join(_SUPPORTED_API_VERSIONS)
+        raise ValueError(f"Unsupported API version '{api_version}'. Please select from:\n{versions}")
+    return api_version or _SUPPORTED_API_VERSIONS[-1]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client.py
new file mode 100644
index 00000000..602757dd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client.py
@@ -0,0 +1,995 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Any, cast, Dict, Literal, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._deserialize import deserialize_permission, deserialize_share_properties
+from ._directory_client import ShareDirectoryClient
+from ._file_client import ShareFileClient
+from ._generated import AzureFileStorage
+from ._generated.models import (
+    DeleteSnapshotsOptionType,
+    ShareStats,
+    SignedIdentifier
+)
+from ._lease import ShareLeaseClient
+from ._models import ShareProtocols
+from ._parser import _parse_snapshot
+from ._serialize import get_access_conditions, get_api_version
+from ._share_client_helpers import (
+    _create_permission_for_share_options,
+    _format_url,
+    _from_share_url,
+    _parse_url
+)
+from ._shared.base_client import parse_connection_str, parse_query, StorageAccountHostsMixin, TransportWrapper
+from ._shared.request_handlers import add_metadata_headers, serialize_iso
+from ._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import AccessPolicy, DirectoryProperties, FileProperties, ShareProperties
+
+
+class ShareClient(StorageAccountHostsMixin):
+    """A client to interact with a specific share, although that share may not yet exist.
+
+    For operations relating to a specific directory or file in this share, the clients for
+    those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the share,
+        use the :func:`from_share_url` classmethod.
+    :param share_name:
+        The name of the share with which to interact.
+    :type share_name: str
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `TokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an TokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self._query_str, credential = self._format_query_string(
+            sas_token=sas_token, credential=credential, share_snapshot=self.snapshot)
+        super(ShareClient, self).__init__(
+            parsed_url=parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_share_url(
+        cls, share_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """
+        :param str share_url: The full URI to the share.
+        :param snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.ShareClient
+        """
+        account_url, share_name, path_snapshot = _from_share_url(share_url, snapshot)
+        return cls(account_url, share_name, path_snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param str share_name: The name of the share.
+        :param snapshot:
+            The optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START create_share_client_from_conn_string]
+                :end-before: [END create_share_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client from connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs)
+
+    def get_directory_client(self, directory_path: Optional[str] = None) -> ShareDirectoryClient:
+        """Get a client to interact with the specified directory.
+        The directory need not already exist.
+
+        :param str directory_path:
+            Path to the specified directory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # pylint: disable=protected-access
+        )
+
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot,
+            credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot)
+
+    def get_file_client(self, file_path: str) -> ShareFileClient:
+        """Get a client to interact with the specified file.
+        The file need not already exist.
+
+        :param str file_path:
+            Path to the specified file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # pylint: disable=protected-access
+        )
+
+        return ShareFileClient(
+            self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot,
+            credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot)
+
+    @distributed_trace
+    def acquire_lease(self, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the share does not have an active lease, the Share
+        Service creates a lease on the share and returns a new lease.
+
+        .. versionadded:: 12.5.0
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The Share Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.ShareLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START acquire_and_release_lease_on_share]
+                :end-before: [END acquire_and_release_lease_on_share]
+                :language: python
+                :dedent: 8
+                :caption: Acquiring a lease on a share.
+        """
+        kwargs['lease_duration'] = kwargs.pop('lease_duration', -1)
+        lease_id = kwargs.pop('lease_id', None)
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace
+    def create_share(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new Share under the account. If a share with the
+        same name already exists, the operation fails.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int quota:
+            The quota to be allotted.
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword protocols:
+            Protocols to enable on the share. Only one protocol can be enabled on the share.
+        :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'.
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START create_share]
+                :end-before: [END create_share]
+                :language: python
+                :dedent: 8
+                :caption: Creates a file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        access_tier = kwargs.pop('access_tier', None)
+        timeout = kwargs.pop('timeout', None)
+        root_squash = kwargs.pop('root_squash', None)
+        protocols = kwargs.pop('protocols', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]:
+            raise ValueError("The enabled protocol must be set to either SMB or NFS.")
+        if root_squash and protocols not in ['NFS', ShareProtocols.NFS]:
+            raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.")
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        try:
+            return cast(Dict[str, Any], self._client.share.create(
+                timeout=timeout,
+                metadata=metadata,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                enabled_protocols=protocols,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def create_snapshot(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a snapshot of the share.
+
+        A snapshot is a read-only version of a share that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a share as it appears at a moment in time.
+
+        A snapshot of a share has the same name as the base share from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Share-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START create_share_snapshot]
+                :end-before: [END create_share_snapshot]
+                :language: python
+                :dedent: 12
+                :caption: Creates a snapshot of the file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], self._client.share.create_snapshot(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def delete_share(
+        self, delete_snapshots: Optional[Union[bool, Literal['include', 'include-leased']]] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param delete_snapshots:
+            Indicates if snapshots are to be deleted. If "True" or enum "include", snapshots will
+            be deleted (but not include leased). To include leased snapshots, specify the "include-leased"
+            enum.
+        :type delete_snapshots:
+            Optional[Union[bool, Literal['include', 'include-leased']]]
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START delete_share]
+                :end-before: [END delete_share]
+                :language: python
+                :dedent: 12
+                :caption: Deletes the share and any snapshots.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        delete_include = None
+        if isinstance(delete_snapshots, bool) and delete_snapshots:
+            delete_include = DeleteSnapshotsOptionType.INCLUDE
+        else:
+            if delete_snapshots == 'include':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE
+            elif delete_snapshots == 'include-leased':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE_LEASED
+        try:
+            self._client.share.delete(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                delete_snapshots=delete_include,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_share_properties(self, **kwargs: Any) -> "ShareProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified share. The data returned does not include the shares's
+        list of files or directories.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: The share properties.
+        :rtype: ~azure.storage.fileshare.ShareProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world.py
+                :start-after: [START get_share_properties]
+                :end-before: [END get_share_properties]
+                :language: python
+                :dedent: 12
+                :caption: Gets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            props = cast("ShareProperties", self._client.share.get_properties(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                cls=deserialize_share_properties,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        props.name = self.share_name
+        props.snapshot = self.snapshot
+        return props
+
+    @distributed_trace
+    def set_share_quota(self, quota: int, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the quota for the share.
+
+        :param int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START set_share_quota]
+                :end-before: [END set_share_quota]
+                :language: python
+                :dedent: 12
+                :caption: Sets the share quota.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=None,
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_share_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the share properties.
+
+        .. versionadded:: 12.4.0
+
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :keyword int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'.
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START set_share_properties]
+                :end-before: [END set_share_properties]
+                :language: python
+                :dedent: 12
+                :caption: Sets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        access_tier = kwargs.pop('access_tier', None)
+        quota = kwargs.pop('quota', None)
+        root_squash = kwargs.pop('root_squash', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if all(parameter is None for parameter in [access_tier, quota, root_squash]):
+            raise ValueError("set_share_properties should be called with at least one parameter.")
+        try:
+            return cast(Dict[str, Any], self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                lease_access_conditions=access_conditions,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_share_metadata(self, metadata: Dict[str, str], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the share.
+
+        Each call to this operation replaces all existing metadata
+        attached to the share. To remove all metadata from the share,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the share as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START set_share_metadata]
+                :end-before: [END set_share_metadata]
+                :language: python
+                :dedent: 12
+                :caption: Sets the share metadata.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], self._client.share.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_share_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the share. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = self._client.share.get_access_policy(
+                timeout=timeout,
+                cls=return_headers_and_deserialized,
+                lease_access_conditions=access_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('share_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace
+    def set_share_access_policy(self, signed_identifiers: Dict[str, "AccessPolicy"], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the permissions for the share, or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the share. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.fileshare.AccessPolicy]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value))
+        try:
+            return cast(Dict[str, Any], self._client.share.set_access_policy(
+                share_acl=identifiers or None,
+                timeout=timeout,
+                cls=return_response_headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_share_stats(self, **kwargs: Any) -> int:
+        """Gets the approximate size of the data stored on the share in bytes.
+
+        Note that this value may not include all recently created
+        or recently re-sized files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :return: The approximate size of the data (in bytes) stored on the share.
+        :rtype: int
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = cast(ShareStats, self._client.share.get_statistics(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+            return stats.share_usage_bytes
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self, directory_name: Optional[str] = None,
+        name_starts_with: Optional[str] = None,
+        marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> ItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists the directories and files under the share.
+
+        :param str directory_name:
+            Name of a directory.
+        :param str name_starts_with:
+            Filters the results to return only directories whose names
+            begin with the specified prefix.
+        :param str marker:
+            An opaque continuation token. This value can be retrieved from the
+            next_marker field of a previous generator object. If specified,
+            this generator will begin returning results from this point.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share.py
+                :start-after: [START share_list_files_in_dir]
+                :end-before: [END share_list_files_in_dir]
+                :language: python
+                :dedent: 12
+                :caption: List directories and files in the share.
+        """
+        timeout = kwargs.pop('timeout', None)
+        directory = self.get_directory_client(directory_name)
+        kwargs.setdefault('merge_span', True)
+        return directory.list_directories_and_files(
+            name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs)
+
+    @distributed_trace
+    def create_permission_for_share(self, file_permission: str, **kwargs: Any) -> Optional[str]:
+        """Create a permission (a security descriptor) at the share level.
+
+        This 'permission' can be used for the files/directories in the share.
+        If a 'permission' already exists, it shall return the key of it, else
+        creates a new permission at the share level and return its key.
+
+        :param str file_permission:
+            File permission, a Portable SDDL
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission key
+        :rtype: str or None
+        """
+        timeout = kwargs.pop('timeout', None)
+        options = _create_permission_for_share_options(file_permission, timeout=timeout, **kwargs)
+        try:
+            return cast(Optional[str], self._client.share.create_permission(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def get_permission_for_share(self, permission_key: str, **kwargs: Any) -> str:
+        """Get a permission (a security descriptor) for a given key.
+
+        This 'permission' can be used for the files/directories in the share.
+
+        :param str permission_key:
+            Key of the file permission to retrieve
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission (a portable SDDL)
+        :rtype: str
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(str, self._client.share.get_permission(
+                file_permission_key=permission_key,
+                cls=deserialize_permission,
+                timeout=timeout,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def create_directory(self, directory_name: str, **kwargs: Any) -> ShareDirectoryClient:
+        """Creates a directory in the share and returns a client to interact
+        with the directory.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        directory = self.get_directory_client(directory_name)
+        kwargs.setdefault('merge_span', True)
+        directory.create_directory(**kwargs)
+        return directory
+
+    @distributed_trace
+    def delete_directory(self, directory_name: str, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        directory = self.get_directory_client(directory_name)
+        directory.delete_directory(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client_helpers.py
new file mode 100644
index 00000000..7d3f041d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_client_helpers.py
@@ -0,0 +1,75 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import (
+    Any, Dict, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import quote, unquote, urlparse
+
+from ._deserialize import deserialize_permission_key
+from ._generated.models import SharePermission
+from ._parser import _parse_snapshot
+from ._shared.base_client import parse_query
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+
+
+def _parse_url(account_url: str, share_name: str) -> "ParseResult":
+    try:
+        if not account_url.lower().startswith('http'):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not share_name:
+        raise ValueError("Please specify a share name.")
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+    return parsed_url
+
+
+def _format_url(scheme: str, hostname: str, share_name: Union[str, bytes], query_str: str) -> str:
+    if isinstance(share_name, str):
+        share_name = share_name.encode('UTF-8')
+    return f"{scheme}://{hostname}/{quote(share_name)}{query_str}"
+
+
+def _from_share_url(share_url: str, snapshot: Optional[Union[str, Dict[str, Any]]]) -> Tuple[str, str, Optional[str]]:
+    try:
+        if not share_url.lower().startswith('http'):
+            share_url = "https://" + share_url
+    except AttributeError as exc:
+        raise ValueError("Share URL must be a string.") from exc
+    parsed_url = urlparse(share_url.rstrip('/'))
+    if not (parsed_url.path and parsed_url.netloc):
+        raise ValueError(f"Invalid URL: {share_url}")
+
+    share_path = parsed_url.path.lstrip('/').split('/')
+    account_path = ""
+    if len(share_path) > 1:
+        account_path = "/" + "/".join(share_path[:-1])
+    account_url = f"{parsed_url.scheme}://{parsed_url.netloc.rstrip('/')}{account_path}?{parsed_url.query}"
+
+    share_name = unquote(share_path[-1])
+    path_snapshot, _ = parse_query(parsed_url.query)
+    path_snapshot = _parse_snapshot(snapshot, path_snapshot)
+
+    if not share_name:
+        raise ValueError("Invalid URL. Please provide a URL with a valid share name")
+
+    return account_url, share_name, path_snapshot
+
+
+def _create_permission_for_share_options(file_permission: str, **kwargs: Any) -> Dict[str, Any]:
+    options = {
+        'share_permission': SharePermission(permission=file_permission),
+        'cls': deserialize_permission_key,
+        'timeout': kwargs.pop('timeout', None),
+    }
+    options.update(kwargs)
+    return options
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client.py
new file mode 100644
index 00000000..4ef878f9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client.py
@@ -0,0 +1,489 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import sys
+import functools
+from typing import (
+    Union, Optional, Any, Dict, List,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+from ._generated import AzureFileStorage
+from ._generated.models import StorageServiceProperties
+from ._models import (
+    CorsRule,
+    ShareProperties,
+    SharePropertiesPaged,
+    service_properties_deserialize,
+)
+from ._serialize import get_api_version
+from ._share_client import ShareClient
+from ._share_service_client_helpers import _parse_url
+from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
+from ._shared.response_handlers import process_storage_error
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
+    from ._models import Metrics, ShareProtocolSettings
+
+
+class ShareServiceClient(StorageAccountHostsMixin):
+    """A client to interact with the File Share Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete shares within the account.
+    For operations relating to a specific share, a client for that entity
+    can also be retrieved using the :func:`get_share_client` function.
+
+    For more optional configuration, please click
+    `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+    #optional-configuration>`__.
+
+    :param str account_url:
+        The URL to the file share storage account. Any other entities included
+        in the URL path (e.g. share or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `TokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/file_samples_authentication.py
+            :start-after: [START create_share_service_client]
+            :end-before: [END create_share_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Create the share service client with url and credential.
+    """
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an TokenCredential.")
+        parsed_url = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ShareServiceClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials.TokenCredential or
+            str or dict[str, str] or None
+        :returns: A File Share service client.
+        :rtype: ~azure.storage.fileshare.ShareServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_authentication.py
+                :start-after: [START create_share_service_client_from_conn_string]
+                :end-before: [END create_share_service_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Create the share service client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace
+    def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's File Share service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary containing file service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START get_service_properties]
+                :end-before: [END get_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Get file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def set_service_properties(
+        self, hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        protocol: Optional["ShareProtocolSettings"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's File Share service, including
+        Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for files.
+        :type hour_metrics: ~azure.storage.fileshare.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for files.
+        :type minute_metrics: ~azure.storage.fileshare.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.fileshare.CorsRule]
+        :param protocol:
+            Sets protocol settings
+        :type protocol: ~azure.storage.fileshare.ShareProtocolSettings
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START set_service_properties]
+                :end-before: [END set_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Sets file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        props = StorageServiceProperties(
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors),  # pylint: disable=protected-access
+            protocol=protocol
+        )
+        try:
+            self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_shares(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: Optional[bool] = False,
+        include_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> ItemPaged[ShareProperties]:
+        """Returns auto-paging iterable of dict-like ShareProperties under the specified account.
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all shares have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only shares whose names
+            begin with the specified name_starts_with.
+        :param bool include_metadata:
+            Specifies that share metadata be returned in the response.
+        :param bool include_snapshots:
+            Specifies that share snapshot be returned in the response.
+        :keyword bool include_deleted:
+            Specifies that deleted shares be returned in the response.
+            This is only for share soft delete enabled account.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ShareProperties.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START fsc_list_shares]
+                :end-before: [END fsc_list_shares]
+                :language: python
+                :dedent: 12
+                :caption: List shares in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        include = []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        if include_metadata:
+            include.append('metadata')
+        if include_snapshots:
+            include.append('snapshots')
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_shares_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return ItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=SharePropertiesPaged)
+
+    @distributed_trace
+    def create_share(self, share_name: str, **kwargs: Any) -> ShareClient:
+        """Creates a new share under the specified account. If the share
+        with the same name already exists, the operation fails. Returns a client with
+        which to interact with the newly created share.
+
+        :param str share_name: The name of the share to create.
+        :keyword dict[str, str] metadata:
+            A dict with name_value pairs to associate with the
+            share as metadata. Example:{'Category':'test'}
+        :keyword int quota:
+            Quota in bytes.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :return: A ShareClient for the newly created Share.
+        :rtype: ~azure.storage.fileshare.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START fsc_create_shares]
+                :end-before: [END fsc_create_shares]
+                :language: python
+                :dedent: 8
+                :caption: Create a share in the file share service.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        timeout = kwargs.pop('timeout', None)
+        provisioned_iops = kwargs.pop('provisioned_iops', None)
+        provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        share.create_share(
+            metadata=metadata,
+            quota=quota,
+            timeout=timeout,
+            provisioned_iops=provisioned_iops,
+            provisioned_bandwidth_mibps=provisioned_bandwidth_mibps,
+            **kwargs
+        )
+        return share
+
+    @distributed_trace
+    def delete_share(
+        self, share_name: Union[ShareProperties, str],
+        delete_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param share_name:
+            The share to delete. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share_name: str or ~azure.storage.fileshare.ShareProperties
+        :param bool delete_snapshots:
+            Indicates if snapshots are to be deleted.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START fsc_delete_shares]
+                :end-before: [END fsc_delete_shares]
+                :language: python
+                :dedent: 12
+                :caption: Delete a share in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        share.delete_share(
+            delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
+
+    @distributed_trace
+    def undelete_share(self, deleted_share_name: str, deleted_share_version: str, **kwargs: Any) -> ShareClient:
+        """Restores soft-deleted share.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.2.0
+
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_share_name:
+            Specifies the name of the deleted share to restore.
+        :param str deleted_share_version:
+            Specifies the version of the deleted share to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: A ShareClient for the undeleted Share.
+        :rtype: ~azure.storage.fileshare.ShareClient
+        """
+        share = self.get_share_client(deleted_share_name)
+
+        try:
+            share._client.share.restore(deleted_share_name=deleted_share_name,  # pylint: disable=protected-access
+                                        deleted_share_version=deleted_share_version,
+                                        timeout=kwargs.pop('timeout', None), **kwargs)
+            return share
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_share_client(
+        self, share: Union[ShareProperties, str],
+        snapshot: Optional[Union[Dict[str, Any], str]] = None
+    ) -> ShareClient:
+        """Get a client to interact with the specified share.
+        The share need not already exist.
+
+        :param share:
+            The share. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share: str or ~azure.storage.fileshare.ShareProperties
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :returns: A ShareClient.
+        :rtype: ~azure.storage.fileshare.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service.py
+                :start-after: [START get_share_client]
+                :end-before: [END get_share_client]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client.
+        """
+        if isinstance(share, ShareProperties):
+            share_name = share.name
+        else:
+            share_name = share
+
+        _pipeline = Pipeline(
+            transport=TransportWrapper(self._pipeline._transport), # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies # pylint: disable=protected-access
+        )
+        return ShareClient(
+            self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
+            api_version=self.api_version, _hosts=self._hosts,
+            _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode,
+            allow_trailing_dot=self.allow_trailing_dot, allow_source_trailing_dot=self.allow_source_trailing_dot,
+            token_intent=self.file_request_intent)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client_helpers.py
new file mode 100644
index 00000000..9a6ebe8e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_share_service_client_helpers.py
@@ -0,0 +1,23 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import TYPE_CHECKING
+from urllib.parse import urlparse
+
+if TYPE_CHECKING:
+    from urllib.parse import ParseResult
+
+
+def _parse_url(account_url: str) -> "ParseResult":
+    try:
+        if not account_url.lower().startswith("http"):
+            account_url = "https://" + account_url
+    except AttributeError as exc:
+        raise ValueError("Account URL must be a string.") from exc
+    parsed_url = urlparse(account_url.rstrip('/'))
+    if not parsed_url.netloc:
+        raise ValueError(f"Invalid URL: {account_url}")
+    return parsed_url
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/__init__.py
new file mode 100644
index 00000000..a8b1a27d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/__init__.py
@@ -0,0 +1,54 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+    from urllib.parse import quote, unquote
+except ImportError:
+    from urllib2 import quote, unquote # type: ignore
+
+
+def url_quote(url):
+    return quote(url)
+
+
+def url_unquote(url):
+    return unquote(url)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+    decoded_bytes = decode_base64_to_bytes(data)
+    return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+    if key_is_base64:
+        key = decode_base64_to_bytes(key)
+    else:
+        if isinstance(key, str):
+            key = key.encode('utf-8')
+    if isinstance(string_to_sign, str):
+        string_to_sign = string_to_sign.encode('utf-8')
+    signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+    digest = signed_hmac_sha256.digest()
+    encoded_digest = encode_base64(digest)
+    return encoded_digest
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/authentication.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/authentication.py
new file mode 100644
index 00000000..44c563d8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/authentication.py
@@ -0,0 +1,244 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import re
+from typing import List, Tuple
+from urllib.parse import unquote, urlparse
+from functools import cmp_to_key
+
+try:
+    from yarl import URL
+except ImportError:
+    pass
+
+try:
+    from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+except ImportError:
+    AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+logger = logging.getLogger(__name__)
+
+table_lv0 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725,
+    0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e,
+    0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51,
+    0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9,
+    0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25,
+    0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99,
+    0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0,
+]
+
+table_lv4 = [
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+    0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+]
+
+def compare(lhs: str, rhs: str) -> int:  # pylint:disable=too-many-return-statements
+    tables = [table_lv0, table_lv4]
+    curr_level, i, j, n = 0, 0, 0, len(tables)
+    lhs_len = len(lhs)
+    rhs_len = len(rhs)
+    while curr_level < n:
+        if curr_level == (n - 1) and i != j:
+            if i > j:
+                return -1
+            if i < j:
+                return 1
+            return 0
+
+        w1 = tables[curr_level][ord(lhs[i])] if i < lhs_len else 0x1
+        w2 = tables[curr_level][ord(rhs[j])] if j < rhs_len else 0x1
+
+        if w1 == 0x1 and w2 == 0x1:
+            i = 0
+            j = 0
+            curr_level += 1
+        elif w1 == w2:
+            i += 1
+            j += 1
+        elif w1 == 0:
+            i += 1
+        elif w2 == 0:
+            j += 1
+        else:
+            if w1 < w2:
+                return -1
+            if w1 > w2:
+                return 1
+            return 0
+    return 0
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+    msg = ""
+    if ex.args:
+        msg = ex.args[0]
+    return desired_type(msg)
+
+# This method attempts to emulate the sorting done by the service
+def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
+
+    # Build dict of tuples and list of keys
+    header_dict = {}
+    header_keys = []
+    for k, v in input_headers:
+        header_dict[k] = v
+        header_keys.append(k)
+
+    try:
+        header_keys = sorted(header_keys, key=cmp_to_key(compare))
+    except ValueError as exc:
+        raise ValueError("Illegal character encountered when sorting headers.") from exc
+
+    # Build list of sorted tuples
+    sorted_headers = []
+    for key in header_keys:
+        sorted_headers.append((key, header_dict.pop(key)))
+    return sorted_headers
+
+
+class AzureSigningError(ClientAuthenticationError):
+    """
+    Represents a fatal error when attempting to sign a request.
+    In general, the cause of this exception is user error. For example, the given account key is not valid.
+    Please visit https://learn.microsoft.com/azure/storage/common/storage-create-storage-account for more info.
+    """
+
+
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+    def __init__(self, account_name, account_key):
+        self.account_name = account_name
+        self.account_key = account_key
+        super(SharedKeyCredentialPolicy, self).__init__()
+
+    @staticmethod
+    def _get_headers(request, headers_to_sign):
+        headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+        if 'content-length' in headers and headers['content-length'] == '0':
+            del headers['content-length']
+        return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+    @staticmethod
+    def _get_verb(request):
+        return request.http_request.method + '\n'
+
+    def _get_canonicalized_resource(self, request):
+        uri_path = urlparse(request.http_request.url).path
+        try:
+            if isinstance(request.context.transport, AioHttpTransport) or \
+                    isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+                    isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+                               AioHttpTransport):
+                uri_path = URL(uri_path)
+                return '/' + self.account_name + str(uri_path)
+        except TypeError:
+            pass
+        return '/' + self.account_name + uri_path
+
+    @staticmethod
+    def _get_canonicalized_headers(request):
+        string_to_sign = ''
+        x_ms_headers = []
+        for name, value in request.http_request.headers.items():
+            if name.startswith('x-ms-'):
+                x_ms_headers.append((name.lower(), value))
+        x_ms_headers = _storage_header_sort(x_ms_headers)
+        for name, value in x_ms_headers:
+            if value is not None:
+                string_to_sign += ''.join([name, ':', value, '\n'])
+        return string_to_sign
+
+    @staticmethod
+    def _get_canonicalized_resource_query(request):
+        sorted_queries = list(request.http_request.query.items())
+        sorted_queries.sort()
+
+        string_to_sign = ''
+        for name, value in sorted_queries:
+            if value is not None:
+                string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+        return string_to_sign
+
+    def _add_authorization_header(self, request, string_to_sign):
+        try:
+            signature = sign_string(self.account_key, string_to_sign)
+            auth_string = 'SharedKey ' + self.account_name + ':' + signature
+            request.http_request.headers['Authorization'] = auth_string
+        except Exception as ex:
+            # Wrap any error that occurred as signing error
+            # Doing so will clarify/locate the source of problem
+            raise _wrap_exception(ex, AzureSigningError) from ex
+
+    def on_request(self, request):
+        string_to_sign = \
+            self._get_verb(request) + \
+            self._get_headers(
+                request,
+                [
+                    'content-encoding', 'content-language', 'content-length',
+                    'content-md5', 'content-type', 'date', 'if-modified-since',
+                    'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+                ]
+            ) + \
+            self._get_canonicalized_headers(request) + \
+            self._get_canonicalized_resource(request) + \
+            self._get_canonicalized_resource_query(request)
+
+        self._add_authorization_header(request, string_to_sign)
+        # logger.debug("String_to_sign=%s", string_to_sign)
+
+
+class StorageHttpChallenge(object):
+    def __init__(self, challenge):
+        """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """
+        if not challenge:
+            raise ValueError("Challenge cannot be empty")
+
+        self._parameters = {}
+        self.scheme, trimmed_challenge = challenge.strip().split(" ", 1)
+
+        # name=value pairs either comma or space separated with values possibly being
+        # enclosed in quotes
+        for item in re.split('[, ]', trimmed_challenge):
+            comps = item.split("=")
+            if len(comps) == 2:
+                key = comps[0].strip(' "')
+                value = comps[1].strip(' "')
+                if key:
+                    self._parameters[key] = value
+
+        # Extract and verify required parameters
+        self.authorization_uri = self._parameters.get('authorization_uri')
+        if not self.authorization_uri:
+            raise ValueError("Authorization Uri not found")
+
+        self.resource_id = self._parameters.get('resource_id')
+        if not self.resource_id:
+            raise ValueError("Resource id not found")
+
+        uri_path = urlparse(self.authorization_uri).path.lstrip("/")
+        self.tenant_id = uri_path.split("/")[0]
+
+    def get_value(self, key):
+        return self._parameters.get(key)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client.py
new file mode 100644
index 00000000..9dc8d2ec
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client.py
@@ -0,0 +1,458 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+import uuid
+from typing import (
+    Any,
+    cast,
+    Dict,
+    Iterator,
+    Optional,
+    Tuple,
+    TYPE_CHECKING,
+    Union,
+)
+from urllib.parse import parse_qs, quote
+
+from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential, TokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import HttpTransport, RequestsTransport  # pylint: disable=non-abstract-transport-import, no-name-in-module
+from azure.core.pipeline.policies import (
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+    ProxyPolicy,
+    RedirectPolicy,
+    UserAgentPolicy,
+)
+
+from .authentication import SharedKeyCredentialPolicy
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import LocationMode, StorageConfiguration
+from .policies import (
+    ExponentialRetry,
+    QueueMessagePolicy,
+    StorageBearerTokenCredentialPolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageLoggingPolicy,
+    StorageRequestHook,
+    StorageResponseHook,
+)
+from .request_handlers import serialize_batch_body, _get_batch_request_delimiter
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .shared_access_signature import QueryStringConstants
+from .._version import VERSION
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class StorageAccountHostsMixin(object):
+    _client: Any
+    def __init__(
+        self,
+        parsed_url: Any,
+        service: str,
+        credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+        self._hosts = kwargs.get("_hosts")
+        self.scheme = parsed_url.scheme
+        self._is_localhost = False
+
+        if service not in ["blob", "queue", "file-share", "dfs"]:
+            raise ValueError(f"Invalid service: {service}")
+        service_name = service.split('-')[0]
+        account = parsed_url.netloc.split(f".{service_name}.core.")
+
+        self.account_name = account[0] if len(account) > 1 else None
+        if not self.account_name and parsed_url.netloc.startswith("localhost") \
+                or parsed_url.netloc.startswith("127.0.0.1"):
+            self._is_localhost = True
+            self.account_name = parsed_url.path.strip("/")
+
+        self.credential = _format_shared_key_credential(self.account_name, credential)
+        if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+            raise ValueError("Token credential is only supported with HTTPS.")
+
+        secondary_hostname = None
+        if hasattr(self.credential, "account_name"):
+            self.account_name = self.credential.account_name
+            secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}"
+
+        if not self._hosts:
+            if len(account) > 1:
+                secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+            if kwargs.get("secondary_hostname"):
+                secondary_hostname = kwargs["secondary_hostname"]
+            primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+            self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+        self._sdk_moniker = f"storage-{service}/{VERSION}"
+        self._config, self._pipeline = self._create_pipeline(self.credential, sdk_moniker=self._sdk_moniker, **kwargs)
+
+    def __enter__(self):
+        self._client.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        self._client.__exit__(*args)
+
+    def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        self._client.close()
+
+    @property
+    def url(self):
+        """The full endpoint URL to this entity, including SAS token if used.
+
+        This could be either the primary endpoint,
+        or the secondary endpoint depending on the current :func:`location_mode`.
+        :returns: The full endpoint URL to this entity, including SAS token if used.
+        :rtype: str
+        """
+        return self._format_url(self._hosts[self._location_mode])
+
+    @property
+    def primary_endpoint(self):
+        """The full primary endpoint URL.
+
+        :rtype: str
+        """
+        return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+    @property
+    def primary_hostname(self):
+        """The hostname of the primary endpoint.
+
+        :rtype: str
+        """
+        return self._hosts[LocationMode.PRIMARY]
+
+    @property
+    def secondary_endpoint(self):
+        """The full secondary endpoint URL if configured.
+
+        If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: str
+        :raise ValueError:
+        """
+        if not self._hosts[LocationMode.SECONDARY]:
+            raise ValueError("No secondary host configured.")
+        return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+    @property
+    def secondary_hostname(self):
+        """The hostname of the secondary endpoint.
+
+        If not available this will be None. To explicitly specify a secondary hostname, use the optional
+        `secondary_hostname` keyword argument on instantiation.
+
+        :rtype: Optional[str]
+        """
+        return self._hosts[LocationMode.SECONDARY]
+
+    @property
+    def location_mode(self):
+        """The location mode that the client is currently using.
+
+        By default this will be "primary". Options include "primary" and "secondary".
+
+        :rtype: str
+        """
+
+        return self._location_mode
+
+    @location_mode.setter
+    def location_mode(self, value):
+        if self._hosts.get(value):
+            self._location_mode = value
+            self._client._config.url = self.url  # pylint: disable=protected-access
+        else:
+            raise ValueError(f"No host URL for location mode: {value}")
+
+    @property
+    def api_version(self):
+        """The version of the Storage API used for requests.
+
+        :rtype: str
+        """
+        return self._client._config.version  # pylint: disable=protected-access
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", TokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            credential = cast(str, credential)
+            query_str += credential.lstrip("?")
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, Pipeline]:
+        self._credential_policy: Any = None
+        if hasattr(credential, "get_token"):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = StorageBearerTokenCredentialPolicy(cast(TokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+
+        config = kwargs.get("_configuration") or create_configuration(**kwargs)
+        if kwargs.get("_pipeline"):
+            return config, kwargs["_pipeline"]
+        transport = kwargs.get("transport")
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            transport = RequestsTransport(**kwargs)
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            RedirectPolicy(**kwargs),
+            StorageHosts(hosts=self._hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            StorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs)
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  # type: ignore
+        config.transport = transport  # type: ignore
+        return config, Pipeline(transport, policies=policies)
+
+    def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> Iterator["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An iterator of HttpResponse objects.
+        :rtype: Iterator[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        batch_id = str(uuid.uuid1())
+
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version,
+                "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False)
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        Pipeline._prepare_multipart_mixed_request(request)  # pylint: disable=protected-access
+        body = serialize_batch_body(request.multipart_mixed_info[0], batch_id)
+        request.set_bytes_body(body)
+
+        temp = request.multipart_mixed_info
+        request.multipart_mixed_info = None
+        pipeline_response = self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+        request.multipart_mixed_info = temp
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts()
+            if raise_on_any_failure:
+                parts = list(response.parts())
+                if any(p for p in parts if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts
+                    )
+                    raise error
+                return iter(parts)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+
+class TransportWrapper(HttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, transport):
+        self._transport = transport
+
+    def send(self, request, **kwargs):
+        return self._transport.send(request, **kwargs)
+
+    def open(self):
+        pass
+
+    def close(self):
+        pass
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, *args):
+        pass
+
+
+def _format_shared_key_credential(
+    account_name: Optional[str],
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential", TokenCredential]] = None  # pylint: disable=line-too-long
+) -> Any:
+    if isinstance(credential, str):
+        if not account_name:
+            raise ValueError("Unable to determine account name for shared key credential.")
+        credential = {"account_name": account_name, "account_key": credential}
+    if isinstance(credential, dict):
+        if "account_name" not in credential:
+            raise ValueError("Shared key credential missing 'account_name")
+        if "account_key" not in credential:
+            raise ValueError("Shared key credential missing 'account_key")
+        return SharedKeyCredentialPolicy(**credential)
+    if isinstance(credential, AzureNamedKeyCredential):
+        return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key)
+    return credential
+
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, TokenCredential]]]:  # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+
+def create_configuration(**kwargs: Any) -> StorageConfiguration:
+     # Backwards compatibility if someone is not passing sdk_moniker
+    if not kwargs.get("sdk_moniker"):
+        kwargs["sdk_moniker"] = f"storage-{kwargs.pop('storage_sdk')}/{VERSION}"
+    config = StorageConfiguration(**kwargs)
+    config.headers_policy = StorageHeadersPolicy(**kwargs)
+    config.user_agent_policy = UserAgentPolicy(**kwargs)
+    config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+    config.logging_policy = StorageLoggingPolicy(**kwargs)
+    config.proxy_policy = ProxyPolicy(**kwargs)
+    return config
+
+
+def parse_query(query_str: str) -> Tuple[Optional[str], Optional[str]]:
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+    sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values]
+    sas_token = None
+    if sas_params:
+        sas_token = "&".join(sas_params)
+
+    snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+    return snapshot, sas_token
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client_async.py
new file mode 100644
index 00000000..6186b29d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/base_client_async.py
@@ -0,0 +1,280 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# mypy: disable-error-code="attr-defined"
+
+import logging
+from typing import Any, cast, Dict, Optional, Tuple, TYPE_CHECKING, Union
+
+from azure.core.async_paging import AsyncList
+from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+from azure.core.credentials_async import AsyncTokenCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.policies import (
+    AsyncRedirectPolicy,
+    AzureSasCredentialPolicy,
+    ContentDecodePolicy,
+    DistributedTracingPolicy,
+    HttpLoggingPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .constants import CONNECTION_TIMEOUT, DEFAULT_OAUTH_SCOPE, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE
+from .models import StorageConfiguration
+from .policies import (
+    QueueMessagePolicy,
+    StorageContentValidation,
+    StorageHeadersPolicy,
+    StorageHosts,
+    StorageRequestHook,
+)
+from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook
+from .response_handlers import PartialBatchErrorException, process_storage_error
+from .._shared_access_signature import _is_credential_sastoken
+
+if TYPE_CHECKING:
+    from azure.core.pipeline.transport import HttpRequest, HttpResponse  # pylint: disable=C4756
+_LOGGER = logging.getLogger(__name__)
+
+_SERVICE_PARAMS = {
+    "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+    "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+    "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+    "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+    def __enter__(self):
+        raise TypeError("Async client only supports 'async with'.")
+
+    def __exit__(self, *args):
+        pass
+
+    async def __aenter__(self):
+        await self._client.__aenter__()
+        return self
+
+    async def __aexit__(self, *args):
+        await self._client.__aexit__(*args)
+
+    async def close(self):
+        """ This method is to close the sockets opened by the client.
+        It need not be used when using with a context manager.
+        """
+        await self._client.close()
+
+    def _format_query_string(
+        self, sas_token: Optional[str],
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]],  # pylint: disable=line-too-long
+        snapshot: Optional[str] = None,
+        share_snapshot: Optional[str] = None
+    ) -> Tuple[str, Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", AsyncTokenCredential]]]:  # pylint: disable=line-too-long
+        query_str = "?"
+        if snapshot:
+            query_str += f"snapshot={snapshot}&"
+        if share_snapshot:
+            query_str += f"sharesnapshot={share_snapshot}&"
+        if sas_token and isinstance(credential, AzureSasCredential):
+            raise ValueError(
+                "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+        if _is_credential_sastoken(credential):
+            query_str += credential.lstrip("?")  # type: ignore [union-attr]
+            credential = None
+        elif sas_token:
+            query_str += sas_token
+        return query_str.rstrip("?&"), credential
+
+    def _create_pipeline(
+        self, credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]] = None, # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Tuple[StorageConfiguration, AsyncPipeline]:
+        self._credential_policy: Optional[
+            Union[AsyncStorageBearerTokenCredentialPolicy,
+            SharedKeyCredentialPolicy,
+            AzureSasCredentialPolicy]] = None
+        if hasattr(credential, 'get_token'):
+            if kwargs.get('audience'):
+                audience = str(kwargs.pop('audience')).rstrip('/') + DEFAULT_OAUTH_SCOPE
+            else:
+                audience = STORAGE_OAUTH_SCOPE
+            self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(
+                                        cast(AsyncTokenCredential, credential), audience)
+        elif isinstance(credential, SharedKeyCredentialPolicy):
+            self._credential_policy = credential
+        elif isinstance(credential, AzureSasCredential):
+            self._credential_policy = AzureSasCredentialPolicy(credential)
+        elif credential is not None:
+            raise TypeError(f"Unsupported credential: {type(credential)}")
+        config = kwargs.get('_configuration') or create_configuration(**kwargs)
+        if kwargs.get('_pipeline'):
+            return config, kwargs['_pipeline']
+        transport = kwargs.get('transport')
+        kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+        kwargs.setdefault("read_timeout", READ_TIMEOUT)
+        if not transport:
+            try:
+                from azure.core.pipeline.transport import AioHttpTransport  # pylint: disable=non-abstract-transport-import
+            except ImportError as exc:
+                raise ImportError("Unable to create async transport. Please check aiohttp is installed.") from exc
+            transport = AioHttpTransport(**kwargs)
+        hosts = self._hosts
+        policies = [
+            QueueMessagePolicy(),
+            config.proxy_policy,
+            config.user_agent_policy,
+            StorageContentValidation(),
+            ContentDecodePolicy(response_encoding="utf-8"),
+            AsyncRedirectPolicy(**kwargs),
+            StorageHosts(hosts=hosts, **kwargs),
+            config.retry_policy,
+            config.headers_policy,
+            StorageRequestHook(**kwargs),
+            self._credential_policy,
+            config.logging_policy,
+            AsyncStorageResponseHook(**kwargs),
+            DistributedTracingPolicy(**kwargs),
+            HttpLoggingPolicy(**kwargs),
+        ]
+        if kwargs.get("_additional_pipeline_policies"):
+            policies = policies + kwargs.get("_additional_pipeline_policies")  #type: ignore
+        config.transport = transport #type: ignore
+        return config, AsyncPipeline(transport, policies=policies) #type: ignore
+
+    async def _batch_send(
+        self,
+        *reqs: "HttpRequest",
+        **kwargs: Any
+    ) -> AsyncList["HttpResponse"]:
+        """Given a series of request, do a Storage batch call.
+
+        :param HttpRequest reqs: A collection of HttpRequest objects.
+        :returns: An AsyncList of HttpResponse objects.
+        :rtype: AsyncList[HttpResponse]
+        """
+        # Pop it here, so requests doesn't feel bad about additional kwarg
+        raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+        request = self._client._client.post(  # pylint: disable=protected-access
+            url=(
+                f'{self.scheme}://{self.primary_hostname}/'
+                f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}"
+                f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}"
+            ),
+            headers={
+                'x-ms-version': self.api_version
+            }
+        )
+
+        policies = [StorageHeadersPolicy()]
+        if self._credential_policy:
+            policies.append(self._credential_policy)  # type: ignore
+
+        request.set_multipart_mixed(
+            *reqs,
+            policies=policies,
+            enforce_https=False
+        )
+
+        pipeline_response = await self._pipeline.run(
+            request, **kwargs
+        )
+        response = pipeline_response.http_response
+
+        try:
+            if response.status_code not in [202]:
+                raise HttpResponseError(response=response)
+            parts = response.parts() # Return an AsyncIterator
+            if raise_on_any_failure:
+                parts_list = []
+                async for part in parts:
+                    parts_list.append(part)
+                if any(p for p in parts_list if not 200 <= p.status_code < 300):
+                    error = PartialBatchErrorException(
+                        message="There is a partial failure in the batch operation.",
+                        response=response, parts=parts_list
+                    )
+                    raise error
+                return AsyncList(parts_list)
+            return parts  # type: ignore [no-any-return]
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+def parse_connection_str(
+    conn_str: str,
+    credential: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]],
+    service: str
+) -> Tuple[str, Optional[str], Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, AsyncTokenCredential]]]: # pylint: disable=line-too-long
+    conn_str = conn_str.rstrip(";")
+    conn_settings_list = [s.split("=", 1) for s in conn_str.split(";")]
+    if any(len(tup) != 2 for tup in conn_settings_list):
+        raise ValueError("Connection string is either blank or malformed.")
+    conn_settings = dict((key.upper(), val) for key, val in conn_settings_list)
+    endpoints = _SERVICE_PARAMS[service]
+    primary = None
+    secondary = None
+    if not credential:
+        try:
+            credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+        except KeyError:
+            credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+    if endpoints["primary"] in conn_settings:
+        primary = conn_settings[endpoints["primary"]]
+        if endpoints["secondary"] in conn_settings:
+            secondary = conn_settings[endpoints["secondary"]]
+    else:
+        if endpoints["secondary"] in conn_settings:
+            raise ValueError("Connection string specifies only secondary endpoint.")
+        try:
+            primary =(
+                f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://"
+                f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+            secondary = (
+                f"{conn_settings['ACCOUNTNAME']}-secondary."
+                f"{service}.{conn_settings['ENDPOINTSUFFIX']}"
+            )
+        except KeyError:
+            pass
+
+    if not primary:
+        try:
+            primary = (
+                f"https://{conn_settings['ACCOUNTNAME']}."
+                f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}"
+            )
+        except KeyError as exc:
+            raise ValueError("Connection string missing required connection details.") from exc
+    if service == "dfs":
+        primary = primary.replace(".blob.", ".dfs.")
+        if secondary:
+            secondary = secondary.replace(".blob.", ".dfs.")
+    return primary, secondary, credential
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+    """Wrapper class that ensures that an inner client created
+    by a `get_client` method does not close the outer transport for the parent
+    when used in a context manager.
+    """
+    def __init__(self, async_transport):
+        self._transport = async_transport
+
+    async def send(self, request, **kwargs):
+        return await self._transport.send(request, **kwargs)
+
+    async def open(self):
+        pass
+
+    async def close(self):
+        pass
+
+    async def __aenter__(self):
+        pass
+
+    async def __aexit__(self, *args):
+        pass
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/constants.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/constants.py
new file mode 100644
index 00000000..0b4b029a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/constants.py
@@ -0,0 +1,19 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from .._serialize import _SUPPORTED_API_VERSIONS
+
+
+X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1]
+
+# Default socket timeouts, in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 60
+
+DEFAULT_OAUTH_SCOPE = "/.default"
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/models.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/models.py
new file mode 100644
index 00000000..403e6b8b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/models.py
@@ -0,0 +1,585 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.configuration import Configuration
+from azure.core.pipeline.policies import UserAgentPolicy
+
+
+def get_enum_value(value):
+    if value is None or value in ["None", ""]:
+        return None
+    try:
+        return value.value
+    except AttributeError:
+        return value
+
+
+class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+
+    # Generic storage values
+    ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists"
+    ACCOUNT_BEING_CREATED = "AccountBeingCreated"
+    ACCOUNT_IS_DISABLED = "AccountIsDisabled"
+    AUTHENTICATION_FAILED = "AuthenticationFailed"
+    AUTHORIZATION_FAILURE = "AuthorizationFailure"
+    NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation"
+    CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported"
+    CONDITION_NOT_MET = "ConditionNotMet"
+    EMPTY_METADATA_KEY = "EmptyMetadataKey"
+    INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions"
+    INTERNAL_ERROR = "InternalError"
+    INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo"
+    INVALID_HEADER_VALUE = "InvalidHeaderValue"
+    INVALID_HTTP_VERB = "InvalidHttpVerb"
+    INVALID_INPUT = "InvalidInput"
+    INVALID_MD5 = "InvalidMd5"
+    INVALID_METADATA = "InvalidMetadata"
+    INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue"
+    INVALID_RANGE = "InvalidRange"
+    INVALID_RESOURCE_NAME = "InvalidResourceName"
+    INVALID_URI = "InvalidUri"
+    INVALID_XML_DOCUMENT = "InvalidXmlDocument"
+    INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue"
+    MD5_MISMATCH = "Md5Mismatch"
+    METADATA_TOO_LARGE = "MetadataTooLarge"
+    MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader"
+    MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter"
+    MISSING_REQUIRED_HEADER = "MissingRequiredHeader"
+    MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode"
+    MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported"
+    OPERATION_TIMED_OUT = "OperationTimedOut"
+    OUT_OF_RANGE_INPUT = "OutOfRangeInput"
+    OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue"
+    REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge"
+    RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch"
+    REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse"
+    RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists"
+    RESOURCE_NOT_FOUND = "ResourceNotFound"
+    SERVER_BUSY = "ServerBusy"
+    UNSUPPORTED_HEADER = "UnsupportedHeader"
+    UNSUPPORTED_XML_NODE = "UnsupportedXmlNode"
+    UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter"
+    UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb"
+
+    # Blob values
+    APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet"
+    BLOB_ACCESS_TIER_NOT_SUPPORTED_FOR_ACCOUNT_TYPE = "BlobAccessTierNotSupportedForAccountType"
+    BLOB_ALREADY_EXISTS = "BlobAlreadyExists"
+    BLOB_NOT_FOUND = "BlobNotFound"
+    BLOB_OVERWRITTEN = "BlobOverwritten"
+    BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength"
+    BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit"
+    BLOCK_LIST_TOO_LONG = "BlockListTooLong"
+    CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier"
+    CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource"
+    CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists"
+    CONTAINER_BEING_DELETED = "ContainerBeingDeleted"
+    CONTAINER_DISABLED = "ContainerDisabled"
+    CONTAINER_NOT_FOUND = "ContainerNotFound"
+    CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit"
+    COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported"
+    COPY_ID_MISMATCH = "CopyIdMismatch"
+    FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch"
+    INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch"
+    INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead.
+    INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
+    INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot"
+    INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired"
+    INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock"
+    INVALID_BLOB_TIER = "InvalidBlobTier"
+    INVALID_BLOB_TYPE = "InvalidBlobType"
+    INVALID_BLOCK_ID = "InvalidBlockId"
+    INVALID_BLOCK_LIST = "InvalidBlockList"
+    INVALID_OPERATION = "InvalidOperation"
+    INVALID_PAGE_RANGE = "InvalidPageRange"
+    INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType"
+    INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl"
+    INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation"
+    LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent"
+    LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken"
+    LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation"
+    LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation"
+    LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation"
+    LEASE_ID_MISSING = "LeaseIdMissing"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired"
+    LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged"
+    LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed"
+    LEASE_LOST = "LeaseLost"
+    LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation"
+    LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation"
+    LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation"
+    MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet"
+    NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation"
+    OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob"
+    PENDING_COPY_OPERATION = "PendingCopyOperation"
+    PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer"
+    PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound"
+    PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported"
+    SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet"
+    SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge"
+    SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded"
+    SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead.
+    SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded"
+    SNAPSHOTS_PRESENT = "SnapshotsPresent"
+    SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet"
+    SYSTEM_IN_USE = "SystemInUse"
+    TARGET_CONDITION_NOT_MET = "TargetConditionNotMet"
+    UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite"
+    BLOB_BEING_REHYDRATED = "BlobBeingRehydrated"
+    BLOB_ARCHIVED = "BlobArchived"
+    BLOB_NOT_ARCHIVED = "BlobNotArchived"
+
+    # Queue values
+    INVALID_MARKER = "InvalidMarker"
+    MESSAGE_NOT_FOUND = "MessageNotFound"
+    MESSAGE_TOO_LARGE = "MessageTooLarge"
+    POP_RECEIPT_MISMATCH = "PopReceiptMismatch"
+    QUEUE_ALREADY_EXISTS = "QueueAlreadyExists"
+    QUEUE_BEING_DELETED = "QueueBeingDeleted"
+    QUEUE_DISABLED = "QueueDisabled"
+    QUEUE_NOT_EMPTY = "QueueNotEmpty"
+    QUEUE_NOT_FOUND = "QueueNotFound"
+
+    # File values
+    CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory"
+    CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay"
+    DELETE_PENDING = "DeletePending"
+    DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty"
+    FILE_LOCK_CONFLICT = "FileLockConflict"
+    FILE_SHARE_PROVISIONED_BANDWIDTH_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedBandwidthDowngradeNotAllowed"
+    FILE_SHARE_PROVISIONED_IOPS_DOWNGRADE_NOT_ALLOWED = "FileShareProvisionedIopsDowngradeNotAllowed"
+    INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName"
+    PARENT_NOT_FOUND = "ParentNotFound"
+    READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute"
+    SHARE_ALREADY_EXISTS = "ShareAlreadyExists"
+    SHARE_BEING_DELETED = "ShareBeingDeleted"
+    SHARE_DISABLED = "ShareDisabled"
+    SHARE_NOT_FOUND = "ShareNotFound"
+    SHARING_VIOLATION = "SharingViolation"
+    SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress"
+    SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded"
+    SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported"
+    SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots"
+    CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed"
+
+    # DataLake values
+    CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero'
+    PATH_ALREADY_EXISTS = 'PathAlreadyExists'
+    INVALID_FLUSH_POSITION = 'InvalidFlushPosition'
+    INVALID_PROPERTY_NAME = 'InvalidPropertyName'
+    INVALID_SOURCE_URI = 'InvalidSourceUri'
+    UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion'
+    FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound'
+    PATH_NOT_FOUND = 'PathNotFound'
+    RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound'
+    SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound'
+    DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted'
+    FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists'
+    FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted'
+    INVALID_DESTINATION_PATH = 'InvalidDestinationPath'
+    INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath'
+    INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType'
+    LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken'
+    LEASE_NAME_MISMATCH = 'LeaseNameMismatch'
+    PATH_CONFLICT = 'PathConflict'
+    SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+    def __setitem__(self, key, item):
+        self.__dict__[key] = item
+
+    def __getitem__(self, key):
+        return self.__dict__[key]
+
+    def __repr__(self):
+        return str(self)
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __delitem__(self, key):
+        self.__dict__[key] = None
+
+    # Compare objects by comparing all attributes.
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    # Compare objects by comparing all attributes.
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+    def __contains__(self, key):
+        return key in self.__dict__
+
+    def has_key(self, k):
+        return k in self.__dict__
+
+    def update(self, *args, **kwargs):
+        return self.__dict__.update(*args, **kwargs)
+
+    def keys(self):
+        return [k for k in self.__dict__ if not k.startswith('_')]
+
+    def values(self):
+        return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def items(self):
+        return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+    def get(self, key, default=None):
+        if key in self.__dict__:
+            return self.__dict__[key]
+        return default
+
+
+class LocationMode(object):
+    """
+    Specifies the location the request should be sent to. This mode only applies
+    for RA-GRS accounts which allow secondary read access. All other account types
+    must use PRIMARY.
+    """
+
+    PRIMARY = 'primary'  #: Requests should be sent to the primary location.
+    SECONDARY = 'secondary'  #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+    """
+    Specifies the resource types that are accessible with the account SAS.
+
+    :param bool service:
+        Access to service-level APIs (e.g., Get/Set Service Properties,
+        Get Service Stats, List Containers/Queues/Shares)
+    :param bool container:
+        Access to container-level APIs (e.g., Create/Delete Container,
+        Create/Delete Queue, Create/Delete Share,
+        List Blobs/Files and Directories)
+    :param bool object:
+        Access to object-level APIs for blobs, queue messages, and
+        files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+    """
+
+    service: bool = False
+    container: bool = False
+    object: bool = False
+    _str: str
+
+    def __init__(
+        self,
+        service: bool = False,
+        container: bool = False,
+        object: bool = False  # pylint: disable=redefined-builtin
+    ) -> None:
+        self.service = service
+        self.container = container
+        self.object = object
+        self._str = (('s' if self.service else '') +
+                ('c' if self.container else '') +
+                ('o' if self.object else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create a ResourceTypes from a string.
+
+        To specify service, container, or object you need only to
+        include the first letter of the word in the string. E.g. service and container,
+        you would provide a string "sc".
+
+        :param str string: Specify service, container, or object in
+            in the string with the first letter of the word.
+        :return: A ResourceTypes object
+        :rtype: ~azure.storage.fileshare.ResourceTypes
+        """
+        res_service = 's' in string
+        res_container = 'c' in string
+        res_object = 'o' in string
+
+        parsed = cls(res_service, res_container, res_object)
+        parsed._str = string
+        return parsed
+
+
+class AccountSasPermissions(object):
+    """
+    :class:`~ResourceTypes` class to be used with generate_account_sas
+    function and for the AccessPolicies used with set_*_acl. There are two types of
+    SAS which may be used to grant resource access. One is to grant access to a
+    specific resource (resource-specific). Another is to grant access to the
+    entire service for a specific account and allow certain operations based on
+    perms found here.
+
+    :param bool read:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits read permissions to the specified resource type.
+    :param bool write:
+        Valid for all signed resources types (Service, Container, and Object).
+        Permits write permissions to the specified resource type.
+    :param bool delete:
+        Valid for Container and Object resource types, except for queue messages.
+    :param bool delete_previous_version:
+        Delete the previous blob version for the versioning enabled storage account.
+    :param bool list:
+        Valid for Service and Container resource types only.
+    :param bool add:
+        Valid for the following Object resource types only: queue messages, and append blobs.
+    :param bool create:
+        Valid for the following Object resource types only: blobs and files.
+        Users can create new blobs or files, but may not overwrite existing
+        blobs or files.
+    :param bool update:
+        Valid for the following Object resource types only: queue messages.
+    :param bool process:
+        Valid for the following Object resource type only: queue messages.
+    :keyword bool tag:
+        To enable set or get tags on the blobs in the container.
+    :keyword bool filter_by_tags:
+        To enable get blobs by tags, this should be used together with list permission.
+    :keyword bool set_immutability_policy:
+        To enable operations related to set/delete immutability policy.
+        To get immutability policy, you just need read permission.
+    :keyword bool permanent_delete:
+        To enable permanent delete on the blob is permitted.
+        Valid for Object resource type of Blob only.
+    """
+
+    read: bool = False
+    write: bool = False
+    delete: bool = False
+    delete_previous_version: bool = False
+    list: bool = False
+    add: bool = False
+    create: bool = False
+    update: bool = False
+    process: bool = False
+    tag: bool = False
+    filter_by_tags: bool = False
+    set_immutability_policy: bool = False
+    permanent_delete: bool = False
+
+    def __init__(
+        self,
+        read: bool = False,
+        write: bool = False,
+        delete: bool = False,
+        list: bool = False,  # pylint: disable=redefined-builtin
+        add: bool = False,
+        create: bool = False,
+        update: bool = False,
+        process: bool = False,
+        delete_previous_version: bool = False,
+        **kwargs
+    ) -> None:
+        self.read = read
+        self.write = write
+        self.delete = delete
+        self.delete_previous_version = delete_previous_version
+        self.permanent_delete = kwargs.pop('permanent_delete', False)
+        self.list = list
+        self.add = add
+        self.create = create
+        self.update = update
+        self.process = process
+        self.tag = kwargs.pop('tag', False)
+        self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+        self.set_immutability_policy = kwargs.pop('set_immutability_policy', False)
+        self._str = (('r' if self.read else '') +
+                     ('w' if self.write else '') +
+                     ('d' if self.delete else '') +
+                     ('x' if self.delete_previous_version else '') +
+                     ('y' if self.permanent_delete else '') +
+                     ('l' if self.list else '') +
+                     ('a' if self.add else '') +
+                     ('c' if self.create else '') +
+                     ('u' if self.update else '') +
+                     ('p' if self.process else '') +
+                     ('f' if self.filter_by_tags else '') +
+                     ('t' if self.tag else '') +
+                     ('i' if self.set_immutability_policy else '')
+                     )
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, permission):
+        """Create AccountSasPermissions from a string.
+
+        To specify read, write, delete, etc. permissions you need only to
+        include the first letter of the word in the string. E.g. for read and write
+        permissions you would provide a string "rw".
+
+        :param str permission: Specify permissions in
+            the string with the first letter of the word.
+        :return: An AccountSasPermissions object
+        :rtype: ~azure.storage.fileshare.AccountSasPermissions
+        """
+        p_read = 'r' in permission
+        p_write = 'w' in permission
+        p_delete = 'd' in permission
+        p_delete_previous_version = 'x' in permission
+        p_permanent_delete = 'y' in permission
+        p_list = 'l' in permission
+        p_add = 'a' in permission
+        p_create = 'c' in permission
+        p_update = 'u' in permission
+        p_process = 'p' in permission
+        p_tag = 't' in permission
+        p_filter_by_tags = 'f' in permission
+        p_set_immutability_policy = 'i' in permission
+        parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+                     list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+                     filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy,
+                     permanent_delete=p_permanent_delete)
+
+        return parsed
+
+
+class Services(object):
+    """Specifies the services accessible with the account SAS.
+
+    :keyword bool blob:
+        Access for the `~azure.storage.blob.BlobServiceClient`. Default is False.
+    :keyword bool queue:
+        Access for the `~azure.storage.queue.QueueServiceClient`. Default is False.
+    :keyword bool fileshare:
+        Access for the `~azure.storage.fileshare.ShareServiceClient`. Default is False.
+    """
+
+    def __init__(
+        self, *,
+        blob: bool = False,
+        queue: bool = False,
+        fileshare: bool = False
+    ) -> None:
+        self.blob = blob
+        self.queue = queue
+        self.fileshare = fileshare
+        self._str = (('b' if self.blob else '') +
+                ('q' if self.queue else '') +
+                ('f' if self.fileshare else ''))
+
+    def __str__(self):
+        return self._str
+
+    @classmethod
+    def from_string(cls, string):
+        """Create Services from a string.
+
+        To specify blob, queue, or file you need only to
+        include the first letter of the word in the string. E.g. for blob and queue
+        you would provide a string "bq".
+
+        :param str string: Specify blob, queue, or file in
+            in the string with the first letter of the word.
+        :return: A Services object
+        :rtype: ~azure.storage.fileshare.Services
+        """
+        res_blob = 'b' in string
+        res_queue = 'q' in string
+        res_file = 'f' in string
+
+        parsed = cls(blob=res_blob, queue=res_queue, fileshare=res_file)
+        parsed._str = string
+        return parsed
+
+
+class UserDelegationKey(object):
+    """
+    Represents a user delegation key, provided to the user by Azure Storage
+    based on their Azure Active Directory access token.
+
+    The fields are saved as simple strings since the user does not have to interact with this object;
+    to generate an identify SAS, the user can simply pass it to the right API.
+    """
+
+    signed_oid: Optional[str] = None
+    """Object ID of this token."""
+    signed_tid: Optional[str] = None
+    """Tenant ID of the tenant that issued this token."""
+    signed_start: Optional[str] = None
+    """The datetime this token becomes valid."""
+    signed_expiry: Optional[str] = None
+    """The datetime this token expires."""
+    signed_service: Optional[str] = None
+    """What service this key is valid for."""
+    signed_version: Optional[str] = None
+    """The version identifier of the REST service that created this token."""
+    value: Optional[str] = None
+    """The user delegation key."""
+
+    def __init__(self):
+        self.signed_oid = None
+        self.signed_tid = None
+        self.signed_start = None
+        self.signed_expiry = None
+        self.signed_service = None
+        self.signed_version = None
+        self.value = None
+
+
+class StorageConfiguration(Configuration):
+    """
+    Specifies the configurable values used in Azure Storage.
+
+    :param int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :param int copy_polling_interval: The interval in seconds for polling copy operations.
+    :param int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob.
+    :param bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :param int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :param int min_large_chunk_upload_threshold: The max size for a single put operation.
+    :param int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :param int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :param int max_range_size: The max range size for file upload.
+
+    """
+
+    max_single_put_size: int
+    copy_polling_interval: int
+    max_block_size: int
+    min_large_block_upload_threshold: int
+    use_byte_buffer: bool
+    max_page_size: int
+    min_large_chunk_upload_threshold: int
+    max_single_get_size: int
+    max_chunk_get_size: int
+    max_range_size: int
+    user_agent_policy: UserAgentPolicy
+
+    def __init__(self, **kwargs):
+        super(StorageConfiguration, self).__init__(**kwargs)
+        self.max_single_put_size = kwargs.pop('max_single_put_size', 64 * 1024 * 1024)
+        self.copy_polling_interval = 15
+        self.max_block_size = kwargs.pop('max_block_size', 4 * 1024 * 1024)
+        self.min_large_block_upload_threshold = kwargs.get('min_large_block_upload_threshold', 4 * 1024 * 1024 + 1)
+        self.use_byte_buffer = kwargs.pop('use_byte_buffer', False)
+        self.max_page_size = kwargs.pop('max_page_size', 4 * 1024 * 1024)
+        self.min_large_chunk_upload_threshold = kwargs.pop('min_large_chunk_upload_threshold', 100 * 1024 * 1024 + 1)
+        self.max_single_get_size = kwargs.pop('max_single_get_size', 32 * 1024 * 1024)
+        self.max_chunk_get_size = kwargs.pop('max_chunk_get_size', 4 * 1024 * 1024)
+        self.max_range_size = kwargs.pop('max_range_size', 4 * 1024 * 1024)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/parser.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/parser.py
new file mode 100644
index 00000000..112c1984
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/parser.py
@@ -0,0 +1,53 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import datetime, timezone
+from typing import Optional
+
+EPOCH_AS_FILETIME = 116444736000000000  # January 1, 1970 as MS filetime
+HUNDREDS_OF_NANOSECONDS = 10000000
+
+
+def _to_utc_datetime(value: datetime) -> str:
+    return value.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+def _rfc_1123_to_datetime(rfc_1123: str) -> Optional[datetime]:
+    """Converts an RFC 1123 date string to a UTC datetime.
+
+    :param str rfc_1123: The time and date in RFC 1123 format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not rfc_1123:
+        return None
+
+    return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z")
+
+
+def _filetime_to_datetime(filetime: str) -> Optional[datetime]:
+    """Converts an MS filetime string to a UTC datetime. "0" indicates None.
+    If parsing MS Filetime fails, tries RFC 1123 as backup.
+
+    :param str filetime: The time and date in MS filetime format.
+    :returns: The time and date in UTC datetime format.
+    :rtype: datetime
+    """
+    if not filetime:
+        return None
+
+    # Try to convert to MS Filetime
+    try:
+        temp_filetime = int(filetime)
+        if temp_filetime == 0:
+            return None
+
+        return datetime.fromtimestamp((temp_filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc)
+    except ValueError:
+        pass
+
+    # Try RFC 1123 as backup
+    return _rfc_1123_to_datetime(filetime)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies.py
new file mode 100644
index 00000000..ee75cd5a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies.py
@@ -0,0 +1,694 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import logging
+import random
+import re
+import uuid
+from io import SEEK_SET, UnsupportedOperation
+from time import time
+from typing import Any, Dict, Optional, TYPE_CHECKING
+from urllib.parse import (
+    parse_qsl,
+    urlencode,
+    urlparse,
+    urlunparse,
+)
+from wsgiref.handlers import format_date_time
+
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+from azure.core.pipeline.policies import (
+    BearerTokenCredentialPolicy,
+    HeadersPolicy,
+    HTTPPolicy,
+    NetworkTraceLoggingPolicy,
+    RequestHistory,
+    SansIOHTTPPolicy
+)
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .models import LocationMode
+
+if TYPE_CHECKING:
+    from azure.core.credentials import TokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+    if isinstance(data, str):
+        data = data.encode('utf-8')
+    encoded = base64.b64encode(data)
+    return encoded.decode('utf-8')
+
+
+# Are we out of retries?
+def is_exhausted(settings):
+    retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+    retry_counts = list(filter(None, retry_counts))
+    if not retry_counts:
+        return False
+    return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+# Is this method/status code retryable? (Based on allowlists and control
+# variables such as the number of total retries to allow, whether to
+# respect the Retry-After header, whether this header is present, and
+# whether the returned status code is on the list of status codes to
+# be retried upon on the presence of the aforementioned header)
+def is_retry(response, mode):
+    status = response.http_response.status_code
+    if 300 <= status < 500:
+        # An exception occurred, but in most cases it was expected. Examples could
+        # include a 309 Conflict or 412 Precondition Failed.
+        if status == 404 and mode == LocationMode.SECONDARY:
+            # Response code 404 should be retried if secondary was used.
+            return True
+        if status == 408:
+            # Response code 408 is a timeout and should be retried.
+            return True
+        return False
+    if status >= 500:
+        # Response codes above 500 with the exception of 501 Not Implemented and
+        # 505 Version Not Supported indicate a server issue and should be retried.
+        if status in [501, 505]:
+            return False
+        return True
+    return False
+
+
+def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+def urljoin(base_url, stub_url):
+    parsed = urlparse(base_url)
+    parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+    return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+    def on_request(self, request):
+        message_id = request.context.options.pop('queue_message_id', None)
+        if message_id:
+            request.http_request.url = urljoin(
+                request.http_request.url,
+                message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+    request_id_header_name = 'x-ms-client-request-id'
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        super(StorageHeadersPolicy, self).on_request(request)
+        current_time = format_date_time(time())
+        request.http_request.headers['x-ms-date'] = current_time
+
+        custom_id = request.context.options.pop('client_request_id', None)
+        request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+    # def on_response(self, request, response):
+    #     # raise exception if the echoed client request id from the service is not identical to the one we sent
+    #     if self.request_id_header_name in response.http_response.headers:
+
+    #         client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+    #         if response.http_response.headers[self.request_id_header_name] != client_request_id:
+    #             raise AzureError(
+    #                 "Echoed client request ID: {} does not match sent client request ID: {}.  "
+    #                 "Service request ID: {}".format(
+    #                     response.http_response.headers[self.request_id_header_name], client_request_id,
+    #                     response.http_response.headers['x-ms-request-id']),
+    #                 response=response.http_response
+    #             )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+    def __init__(self, hosts=None, **kwargs):  # pylint: disable=unused-argument
+        self.hosts = hosts
+        super(StorageHosts, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request.context.options['hosts'] = self.hosts
+        parsed_url = urlparse(request.http_request.url)
+
+        # Detect what location mode we're currently requesting with
+        location_mode = LocationMode.PRIMARY
+        for key, value in self.hosts.items():
+            if parsed_url.netloc == value:
+                location_mode = key
+
+        # See if a specific location mode has been specified, and if so, redirect
+        use_location = request.context.options.pop('use_location', None)
+        if use_location:
+            # Lock retries to the specific location
+            request.context.options['retry_to_secondary'] = False
+            if use_location not in self.hosts:
+                raise ValueError(f"Attempting to use undefined host location {use_location}")
+            if use_location != location_mode:
+                # Update request URL to use the specified location
+                updated = parsed_url._replace(netloc=self.hosts[use_location])
+                request.http_request.url = updated.geturl()
+                location_mode = use_location
+
+        request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+    """A policy that logs HTTP request and response to the DEBUG logger.
+
+    This accepts both global configuration, and per-request level with "enable_http_logger"
+    """
+
+    def __init__(self, logging_enable: bool = False, **kwargs) -> None:
+        self.logging_body = kwargs.pop("logging_body", False)
+        super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs)
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        http_request = request.http_request
+        options = request.context.options
+        self.logging_body = self.logging_body or options.pop("logging_body", False)
+        if options.pop("logging_enable", self.enable_http_logger):
+            request.context["logging_enable"] = True
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                log_url = http_request.url
+                query_params = http_request.query
+                if 'sig' in query_params:
+                    log_url = log_url.replace(query_params['sig'], "sig=*****")
+                _LOGGER.debug("Request URL: %r", log_url)
+                _LOGGER.debug("Request method: %r", http_request.method)
+                _LOGGER.debug("Request headers:")
+                for header, value in http_request.headers.items():
+                    if header.lower() == 'authorization':
+                        value = '*****'
+                    elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+                        # take the url apart and scrub away the signed signature
+                        scheme, netloc, path, params, query, fragment = urlparse(value)
+                        parsed_qs = dict(parse_qsl(query))
+                        parsed_qs['sig'] = '*****'
+
+                        # the SAS needs to be put back together
+                        value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+                    _LOGGER.debug("    %r: %r", header, value)
+                _LOGGER.debug("Request body:")
+
+                if self.logging_body:
+                    _LOGGER.debug(str(http_request.body))
+                else:
+                    # We don't want to log the binary data of a file upload.
+                    _LOGGER.debug("Hidden body, please use logging_body to show body")
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log request: %r", err)
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.pop("logging_enable", self.enable_http_logger):
+            if not _LOGGER.isEnabledFor(logging.DEBUG):
+                return
+
+            try:
+                _LOGGER.debug("Response status: %r", response.http_response.status_code)
+                _LOGGER.debug("Response headers:")
+                for res_header, value in response.http_response.headers.items():
+                    _LOGGER.debug("    %r: %r", res_header, value)
+
+                # We don't want to log binary data if the response is a file.
+                _LOGGER.debug("Response content:")
+                pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+                header = response.http_response.headers.get('content-disposition')
+                resp_content_type = response.http_response.headers.get("content-type", "")
+
+                if header and pattern.match(header):
+                    filename = header.partition('=')[2]
+                    _LOGGER.debug("File attachments: %s", filename)
+                elif resp_content_type.endswith("octet-stream"):
+                    _LOGGER.debug("Body contains binary data.")
+                elif resp_content_type.startswith("image"):
+                    _LOGGER.debug("Body contains image data.")
+
+                if self.logging_body and resp_content_type.startswith("text"):
+                    _LOGGER.debug(response.http_response.text())
+                elif self.logging_body:
+                    try:
+                        _LOGGER.debug(response.http_response.body())
+                    except ValueError:
+                        _LOGGER.debug("Body is streamable")
+
+            except Exception as err:  # pylint: disable=broad-except
+                _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._request_callback = kwargs.get('raw_request_hook')
+        super(StorageRequestHook, self).__init__()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+        if request_callback:
+            request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(StorageResponseHook, self).__init__()
+
+    def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+    """A simple policy that sends the given headers
+    with the request.
+
+    This will overwrite any headers already defined in the request.
+    """
+    header_name = 'Content-MD5'
+
+    def __init__(self, **kwargs: Any) -> None:  # pylint: disable=unused-argument
+        super(StorageContentValidation, self).__init__()
+
+    @staticmethod
+    def get_content_md5(data):
+        # Since HTTP does not differentiate between no content and empty content,
+        # we have to perform a None check.
+        data = data or b""
+        md5 = hashlib.md5() # nosec
+        if isinstance(data, bytes):
+            md5.update(data)
+        elif hasattr(data, 'read'):
+            pos = 0
+            try:
+                pos = data.tell()
+            except:  # pylint: disable=bare-except
+                pass
+            for chunk in iter(lambda: data.read(4096), b""):
+                md5.update(chunk)
+            try:
+                data.seek(pos, SEEK_SET)
+            except (AttributeError, IOError) as exc:
+                raise ValueError("Data should be bytes or a seekable file-like object.") from exc
+        else:
+            raise ValueError("Data should be bytes or a seekable file-like object.")
+
+        return md5.digest()
+
+    def on_request(self, request: "PipelineRequest") -> None:
+        validate_content = request.context.options.pop('validate_content', False)
+        if validate_content and request.http_request.method != 'GET':
+            computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+            request.http_request.headers[self.header_name] = computed_md5
+            request.context['validate_content_md5'] = computed_md5
+        request.context['validate_content'] = validate_content
+
+    def on_response(self, request: "PipelineRequest", response: "PipelineResponse") -> None:
+        if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+            computed_md5 = request.context.get('validate_content_md5') or \
+                encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+            if response.http_response.headers['content-md5'] != computed_md5:
+                raise AzureError((
+                    f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', "
+                    f"computed value is '{computed_md5}'."),
+                    response=response.http_response
+                )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    total_retries: int
+    """The max number of retries."""
+    connect_retries: int
+    """The max number of connect retries."""
+    retry_read: int
+    """The max number of read retries."""
+    retry_status: int
+    """The max number of status retries."""
+    retry_to_secondary: bool
+    """Whether the secondary endpoint should be retried."""
+
+    def __init__(self, **kwargs: Any) -> None:
+        self.total_retries = kwargs.pop('retry_total', 10)
+        self.connect_retries = kwargs.pop('retry_connect', 3)
+        self.read_retries = kwargs.pop('retry_read', 3)
+        self.status_retries = kwargs.pop('retry_status', 3)
+        self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+        super(StorageRetryPolicy, self).__init__()
+
+    def _set_next_host_location(self, settings: Dict[str, Any], request: "PipelineRequest") -> None:
+        """
+        A function which sets the next host location on the request, if applicable.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the next host location.
+        :param PipelineRequest request: A pipeline request object.
+        """
+        if settings['hosts'] and all(settings['hosts'].values()):
+            url = urlparse(request.url)
+            # If there's more than one possible location, retry to the alternative
+            if settings['mode'] == LocationMode.PRIMARY:
+                settings['mode'] = LocationMode.SECONDARY
+            else:
+                settings['mode'] = LocationMode.PRIMARY
+            updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+            request.url = updated.geturl()
+
+    def configure_retries(self, request: "PipelineRequest") -> Dict[str, Any]:
+        body_position = None
+        if hasattr(request.http_request.body, 'read'):
+            try:
+                body_position = request.http_request.body.tell()
+            except (AttributeError, UnsupportedOperation):
+                # if body position cannot be obtained, then retries will not work
+                pass
+        options = request.context.options
+        return {
+            'total': options.pop("retry_total", self.total_retries),
+            'connect': options.pop("retry_connect", self.connect_retries),
+            'read': options.pop("retry_read", self.read_retries),
+            'status': options.pop("retry_status", self.status_retries),
+            'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+            'mode': options.pop("location_mode", LocationMode.PRIMARY),
+            'hosts': options.pop("hosts", None),
+            'hook': options.pop("retry_hook", None),
+            'body_position': body_position,
+            'count': 0,
+            'history': []
+        }
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:  # pylint: disable=unused-argument
+        """ Formula for computing the current backoff.
+        Should be calculated by child class.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :returns: The backoff time.
+        :rtype: float
+        """
+        return 0
+
+    def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        transport.sleep(backoff)
+
+    def increment(
+        self, settings: Dict[str, Any],
+        request: "PipelineRequest",
+        response: Optional["PipelineResponse"] = None,
+        error: Optional[AzureError] = None
+    ) -> bool:
+        """Increment the retry counters.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the increment operation.
+        :param PipelineRequest request: A pipeline request object.
+        :param Optional[PipelineResponse] response: A pipeline response object.
+        :param Optional[AzureError] error: An error encountered during the request, or
+            None if the response was received successfully.
+        :returns: Whether the retry attempts are exhausted.
+        :rtype: bool
+        """
+        settings['total'] -= 1
+
+        if error and isinstance(error, ServiceRequestError):
+            # Errors when we're fairly sure that the server did not receive the
+            # request, so it should be safe to retry.
+            settings['connect'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        elif error and isinstance(error, ServiceResponseError):
+            # Errors that occur after the request has been started, so we should
+            # assume that the server began processing it.
+            settings['read'] -= 1
+            settings['history'].append(RequestHistory(request, error=error))
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the allowlist
+            if response:
+                settings['status'] -= 1
+                settings['history'].append(RequestHistory(request, http_response=response))
+
+        if not is_exhausted(settings):
+            if request.method not in ['PUT'] and settings['retry_secondary']:
+                self._set_next_host_location(settings, request)
+
+            # rewind the request body if it is a stream
+            if request.body and hasattr(request.body, 'read'):
+                # no position was saved, then retry would not work
+                if settings['body_position'] is None:
+                    return False
+                try:
+                    # attempt to rewind the body to the initial position
+                    request.body.seek(settings['body_position'], SEEK_SET)
+                except (UnsupportedOperation, ValueError):
+                    # if body is not seekable, then retry would not work
+                    return False
+            settings['count'] += 1
+            return True
+        return False
+
+    def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to get backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int retry_total:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any]] settings: The configurable values pertaining to the backoff time.
+        :returns:
+            A float indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: float
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "TokenCredential", audience: str, **kwargs: Any) -> None:
+        super(StorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies_async.py
new file mode 100644
index 00000000..1c030a82
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/policies_async.py
@@ -0,0 +1,296 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import logging
+import random
+from typing import Any, Dict, TYPE_CHECKING
+
+from azure.core.exceptions import AzureError, StreamClosedError, StreamConsumedError
+from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy
+
+from .authentication import AzureSigningError, StorageHttpChallenge
+from .constants import DEFAULT_OAUTH_SCOPE
+from .policies import encode_base64, is_retry, StorageContentValidation, StorageRetryPolicy
+
+if TYPE_CHECKING:
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.transport import (  # pylint: disable=non-abstract-transport-import
+        PipelineRequest,
+        PipelineResponse
+    )
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+    if settings['hook']:
+        if asyncio.iscoroutine(settings['hook']):
+            await settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+        else:
+            settings['hook'](
+                retry_count=settings['count'] - 1,
+                location_mode=settings['mode'],
+                **kwargs)
+
+
+async def is_checksum_retry(response):
+    # retry if invalid content md5
+    if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+        try:
+            await response.http_response.load_body()  # Load the body in memory and close the socket
+        except (StreamClosedError, StreamConsumedError):
+            pass
+        computed_md5 = response.http_request.headers.get('content-md5', None) or \
+                            encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+        if response.http_response.headers['content-md5'] != computed_md5:
+            return True
+    return False
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+    def __init__(self, **kwargs):
+        self._response_callback = kwargs.get('raw_response_hook')
+        super(AsyncStorageResponseHook, self).__init__()
+
+    async def send(self, request: "PipelineRequest") -> "PipelineResponse":
+        # Values could be 0
+        data_stream_total = request.context.get('data_stream_total')
+        if data_stream_total is None:
+            data_stream_total = request.context.options.pop('data_stream_total', None)
+        download_stream_current = request.context.get('download_stream_current')
+        if download_stream_current is None:
+            download_stream_current = request.context.options.pop('download_stream_current', None)
+        upload_stream_current = request.context.get('upload_stream_current')
+        if upload_stream_current is None:
+            upload_stream_current = request.context.options.pop('upload_stream_current', None)
+
+        response_callback = request.context.get('response_callback') or \
+            request.context.options.pop('raw_response_hook', self._response_callback)
+
+        response = await self.next.send(request)
+
+        will_retry = is_retry(response, request.context.options.get('mode')) or await is_checksum_retry(response)
+        # Auth error could come from Bearer challenge, in which case this request will be made again
+        is_auth_error = response.http_response.status_code == 401
+        should_update_counts = not (will_retry or is_auth_error)
+
+        if should_update_counts and download_stream_current is not None:
+            download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+            if data_stream_total is None:
+                content_range = response.http_response.headers.get('Content-Range')
+                if content_range:
+                    data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+                else:
+                    data_stream_total = download_stream_current
+        elif should_update_counts and upload_stream_current is not None:
+            upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+        for pipeline_obj in [request, response]:
+            if hasattr(pipeline_obj, 'context'):
+                pipeline_obj.context['data_stream_total'] = data_stream_total
+                pipeline_obj.context['download_stream_current'] = download_stream_current
+                pipeline_obj.context['upload_stream_current'] = upload_stream_current
+        if response_callback:
+            if asyncio.iscoroutine(response_callback):
+                await response_callback(response) # type: ignore
+            else:
+                response_callback(response)
+            request.context['response_callback'] = response_callback
+        return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+    """
+    The base class for Exponential and Linear retries containing shared code.
+    """
+
+    async def sleep(self, settings, transport):
+        backoff = self.get_backoff_time(settings)
+        if not backoff or backoff < 0:
+            return
+        await transport.sleep(backoff)
+
+    async def send(self, request):
+        retries_remaining = True
+        response = None
+        retry_settings = self.configure_retries(request)
+        while retries_remaining:
+            try:
+                response = await self.next.send(request)
+                if is_retry(response, retry_settings['mode']) or await is_checksum_retry(response):
+                    retries_remaining = self.increment(
+                        retry_settings,
+                        request=request.http_request,
+                        response=response.http_response)
+                    if retries_remaining:
+                        await retry_hook(
+                            retry_settings,
+                            request=request.http_request,
+                            response=response.http_response,
+                            error=None)
+                        await self.sleep(retry_settings, request.context.transport)
+                        continue
+                break
+            except AzureError as err:
+                if isinstance(err, AzureSigningError):
+                    raise
+                retries_remaining = self.increment(
+                    retry_settings, request=request.http_request, error=err)
+                if retries_remaining:
+                    await retry_hook(
+                        retry_settings,
+                        request=request.http_request,
+                        response=None,
+                        error=err)
+                    await self.sleep(retry_settings, request.context.transport)
+                    continue
+                raise err
+        if retry_settings['history']:
+            response.context['history'] = retry_settings['history']
+        response.http_response.location_mode = retry_settings['mode']
+        return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+    """Exponential retry."""
+
+    initial_backoff: int
+    """The initial backoff interval, in seconds, for the first retry."""
+    increment_base: int
+    """The base, in seconds, to increment the initial_backoff by after the
+    first retry."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self,
+        initial_backoff: int = 15,
+        increment_base: int = 3,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3, **kwargs
+    ) -> None:
+        """
+        Constructs an Exponential retry object. The initial_backoff is used for
+        the first retry. Subsequent retries are retried after initial_backoff +
+        increment_power^retry_count seconds. For example, by default the first retry
+        occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+        third after (15+3^2) = 24 seconds.
+
+        :param int initial_backoff:
+            The initial backoff interval, in seconds, for the first retry.
+        :param int increment_base:
+            The base, in seconds, to increment the initial_backoff by after the
+            first retry.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.initial_backoff = initial_backoff
+        self.increment_base = increment_base
+        self.random_jitter_range = random_jitter_range
+        super(ExponentialRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+        random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+        random_range_end = backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+    """Linear retry."""
+
+    initial_backoff: int
+    """The backoff interval, in seconds, between retries."""
+    random_jitter_range: int
+    """A number in seconds which indicates a range to jitter/randomize for the back-off interval."""
+
+    def __init__(
+        self, backoff: int = 15,
+        retry_total: int = 3,
+        retry_to_secondary: bool = False,
+        random_jitter_range: int = 3,
+        **kwargs: Any
+    ) -> None:
+        """
+        Constructs a Linear retry object.
+
+        :param int backoff:
+            The backoff interval, in seconds, between retries.
+        :param int max_attempts:
+            The maximum number of retry attempts.
+        :param bool retry_to_secondary:
+            Whether the request should be retried to secondary, if able. This should
+            only be enabled of RA-GRS accounts are used and potentially stale data
+            can be handled.
+        :param int random_jitter_range:
+            A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+            For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+        """
+        self.backoff = backoff
+        self.random_jitter_range = random_jitter_range
+        super(LinearRetry, self).__init__(
+            retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+    def get_backoff_time(self, settings: Dict[str, Any]) -> float:
+        """
+        Calculates how long to sleep before retrying.
+
+        :param Dict[str, Any] settings: The configurable values pertaining to the backoff time.
+        :return:
+            An integer indicating how long to wait before retrying the request,
+            or None to indicate no retry should be performed.
+        :rtype: int or None
+        """
+        random_generator = random.Random()
+        # the backoff interval normally does not change, however there is the possibility
+        # that it was modified by accessing the property directly after initializing the object
+        random_range_start = self.backoff - self.random_jitter_range \
+            if self.backoff > self.random_jitter_range else 0
+        random_range_end = self.backoff + self.random_jitter_range
+        return random_generator.uniform(random_range_start, random_range_end)
+
+
+class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy):
+    """ Custom Bearer token credential policy for following Storage Bearer challenges """
+
+    def __init__(self, credential: "AsyncTokenCredential", audience: str, **kwargs: Any) -> None:
+        super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, audience, **kwargs)
+
+    async def on_challenge(self, request: "PipelineRequest", response: "PipelineResponse") -> bool:
+        try:
+            auth_header = response.http_response.headers.get("WWW-Authenticate")
+            challenge = StorageHttpChallenge(auth_header)
+        except ValueError:
+            return False
+
+        scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE
+        await self.authorize_request(request, scope, tenant_id=challenge.tenant_id)
+
+        return True
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/request_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/request_handlers.py
new file mode 100644
index 00000000..54927cc7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/request_handlers.py
@@ -0,0 +1,270 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import stat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+from os import fstat
+from typing import Dict, Optional
+
+import isodate
+
+
+_LOGGER = logging.getLogger(__name__)
+
+_REQUEST_DELIMITER_PREFIX = "batch_"
+_HTTP1_1_IDENTIFIER = "HTTP/1.1"
+_HTTP_LINE_ENDING = "\r\n"
+
+
+def serialize_iso(attr):
+    """Serialize Datetime object into ISO-8601 formatted string.
+
+    :param Datetime attr: Object to be serialized.
+    :rtype: str
+    :raises: ValueError if format invalid.
+    """
+    if not attr:
+        return None
+    if isinstance(attr, str):
+        attr = isodate.parse_datetime(attr)
+    try:
+        utc = attr.utctimetuple()
+        if utc.tm_year > 9999 or utc.tm_year < 1:
+            raise OverflowError("Hit max or min date")
+
+        date = f"{utc.tm_year:04}-{utc.tm_mon:02}-{utc.tm_mday:02}T{utc.tm_hour:02}:{utc.tm_min:02}:{utc.tm_sec:02}"
+        return date + 'Z'
+    except (ValueError, OverflowError) as err:
+        raise ValueError("Unable to serialize datetime object.") from err
+    except AttributeError as err:
+        raise TypeError("ISO-8601 object must be valid datetime object.") from err
+
+def get_length(data):
+    length = None
+    # Check if object implements the __len__ method, covers most input cases such as bytearray.
+    try:
+        length = len(data)
+    except:  # pylint: disable=bare-except
+        pass
+
+    if not length:
+        # Check if the stream is a file-like stream object.
+        # If so, calculate the size using the file descriptor.
+        try:
+            fileno = data.fileno()
+        except (AttributeError, UnsupportedOperation):
+            pass
+        else:
+            try:
+                mode = fstat(fileno).st_mode
+                if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
+                    #st_size only meaningful if regular file or symlink, other types
+                    # e.g. sockets may return misleading sizes like 0
+                    return fstat(fileno).st_size
+            except OSError:
+                # Not a valid fileno, may be possible requests returned
+                # a socket number?
+                pass
+
+        # If the stream is seekable and tell() is implemented, calculate the stream size.
+        try:
+            current_position = data.tell()
+            data.seek(0, SEEK_END)
+            length = data.tell() - current_position
+            data.seek(current_position, SEEK_SET)
+        except (AttributeError, OSError, UnsupportedOperation):
+            pass
+
+    return length
+
+
+def read_length(data):
+    try:
+        if hasattr(data, 'read'):
+            read_data = b''
+            for chunk in iter(lambda: data.read(4096), b""):
+                read_data += chunk
+            return len(read_data), read_data
+        if hasattr(data, '__iter__'):
+            read_data = b''
+            for chunk in data:
+                read_data += chunk
+            return len(read_data), read_data
+    except:  # pylint: disable=bare-except
+        pass
+    raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+        start_range, end_range, start_range_required=True,
+        end_range_required=True, check_content_md5=False, align_to_page=False):
+    # If end range is provided, start range must be provided
+    if (start_range_required or end_range is not None) and start_range is None:
+        raise ValueError("start_range value cannot be None.")
+    if end_range_required and end_range is None:
+        raise ValueError("end_range value cannot be None.")
+
+    # Page ranges must be 512 aligned
+    if align_to_page:
+        if start_range is not None and start_range % 512 != 0:
+            raise ValueError(f"Invalid page blob start_range: {start_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        if end_range is not None and end_range % 512 != 511:
+            raise ValueError(f"Invalid page blob end_range: {end_range}. "
+                             "The size must be aligned to a 512-byte boundary.")
+
+    # Format based on whether end_range is present
+    range_header = None
+    if end_range is not None:
+        range_header = f'bytes={start_range}-{end_range}'
+    elif start_range is not None:
+        range_header = f"bytes={start_range}-"
+
+    # Content MD5 can only be provided for a complete range less than 4MB in size
+    range_validation = None
+    if check_content_md5:
+        if start_range is None or end_range is None:
+            raise ValueError("Both start and end range required for MD5 content validation.")
+        if end_range - start_range > 4 * 1024 * 1024:
+            raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+        range_validation = 'true'
+
+    return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+    # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+    headers = {}
+    if metadata:
+        for key, value in metadata.items():
+            headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value
+    return headers
+
+
+def serialize_batch_body(requests, batch_id):
+    """
+    --<delimiter>
+    <subrequest>
+    --<delimiter>
+    <subrequest>    (repeated as needed)
+    --<delimiter>--
+
+    Serializes the requests in this batch to a single HTTP mixed/multipart body.
+
+    :param List[~azure.core.pipeline.transport.HttpRequest] requests:
+        a list of sub-request for the batch request
+    :param str batch_id:
+        to be embedded in batch sub-request delimiter
+    :returns: The body bytes for this batch.
+    :rtype: bytes
+    """
+
+    if requests is None or len(requests) == 0:
+        raise ValueError('Please provide sub-request(s) for this batch request')
+
+    delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
+    newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
+    batch_body = []
+
+    content_index = 0
+    for request in requests:
+        request.headers.update({
+            "Content-ID": str(content_index),
+            "Content-Length": str(0)
+        })
+        batch_body.append(delimiter_bytes)
+        batch_body.append(_make_body_from_sub_request(request))
+        batch_body.append(newline_bytes)
+        content_index += 1
+
+    batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
+    # final line of body MUST have \r\n at the end, or it will not be properly read by the service
+    batch_body.append(newline_bytes)
+
+    return b"".join(batch_body)
+
+
+def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
+    """
+    Gets the delimiter used for this batch request's mixed/multipart HTTP format.
+
+    :param str batch_id:
+        Randomly generated id
+    :param bool is_prepend_dashes:
+        Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
+    :param bool is_append_dashes:
+        Whether to include the ending dashes. Used in the body on the closing delimiter only.
+    :returns: The delimiter, WITHOUT a trailing newline.
+    :rtype: str
+    """
+
+    prepend_dashes = '--' if is_prepend_dashes else ''
+    append_dashes = '--' if is_append_dashes else ''
+
+    return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
+
+
+def _make_body_from_sub_request(sub_request):
+    """
+     Content-Type: application/http
+     Content-ID: <sequential int ID>
+     Content-Transfer-Encoding: <value> (if present)
+
+     <verb> <path><query> HTTP/<version>
+     <header key>: <header value> (repeated as necessary)
+     Content-Length: <value>
+     (newline if content length > 0)
+     <body> (if content length > 0)
+
+     Serializes an http request.
+
+     :param ~azure.core.pipeline.transport.HttpRequest sub_request:
+        Request to serialize.
+     :returns: The serialized sub-request in bytes
+     :rtype: bytes
+     """
+
+    # put the sub-request's headers into a list for efficient str concatenation
+    sub_request_body = []
+
+    # get headers for ease of manipulation; remove headers as they are used
+    headers = sub_request.headers
+
+    # append opening headers
+    sub_request_body.append("Content-Type: application/http")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-ID: ")
+    sub_request_body.append(headers.pop("Content-ID", ""))
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    sub_request_body.append("Content-Transfer-Encoding: binary")
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append HTTP verb and path and query and HTTP version
+    sub_request_body.append(sub_request.method)
+    sub_request_body.append(' ')
+    sub_request_body.append(sub_request.url)
+    sub_request_body.append(' ')
+    sub_request_body.append(_HTTP1_1_IDENTIFIER)
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
+    for header_name, header_value in headers.items():
+        if header_value is not None:
+            sub_request_body.append(header_name)
+            sub_request_body.append(": ")
+            sub_request_body.append(header_value)
+            sub_request_body.append(_HTTP_LINE_ENDING)
+
+    # append blank line
+    sub_request_body.append(_HTTP_LINE_ENDING)
+
+    return ''.join(sub_request_body).encode()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/response_handlers.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/response_handlers.py
new file mode 100644
index 00000000..af9a2fcd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/response_handlers.py
@@ -0,0 +1,200 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+from typing import NoReturn
+from xml.etree.ElementTree import Element
+
+from azure.core.exceptions import (
+    ClientAuthenticationError,
+    DecodeError,
+    HttpResponseError,
+    ResourceExistsError,
+    ResourceModifiedError,
+    ResourceNotFoundError,
+)
+from azure.core.pipeline.policies import ContentDecodePolicy
+
+from .authentication import AzureSigningError
+from .models import get_enum_value, StorageErrorCode, UserDelegationKey
+from .parser import _to_utc_datetime
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+    """There is a partial failure in batch operations.
+
+    :param str message: The message of the exception.
+    :param response: Server response to be deserialized.
+    :param list parts: A list of the parts in multipart response.
+    """
+
+    def __init__(self, message, response, parts):
+        self.parts = parts
+        super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+# Parses the blob length from the content range header: bytes 1-3/65537
+def parse_length_from_content_range(content_range):
+    if content_range is None:
+        return None
+
+    # First, split in space and take the second half: '1-3/65537'
+    # Next, split on slash and take the second half: '65537'
+    # Finally, convert to an int: 65537
+    return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+    normalized = {}
+    for key, value in headers.items():
+        if key.startswith('x-ms-'):
+            key = key[5:]
+        normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+    return normalized
+
+
+def deserialize_metadata(response, obj, headers):  # pylint: disable=unused-argument
+    try:
+        raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    except AttributeError:
+        raw_metadata = {k: v for k, v in response.headers.items() if k.lower().startswith('x-ms-meta-')}
+    return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers):  # pylint: disable=unused-argument
+    return response.http_response.location_mode, deserialized
+
+
+def return_raw_deserialized(response, *_):
+    return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME]
+
+
+def process_storage_error(storage_error) -> NoReturn: # type: ignore [misc] # pylint:disable=too-many-statements, too-many-branches
+    raise_error = HttpResponseError
+    serialized = False
+    if isinstance(storage_error, AzureSigningError):
+        storage_error.message = storage_error.message + \
+            '. This is likely due to an invalid shared key. Please check your shared key and try again.'
+    if not storage_error.response or storage_error.response.status_code in [200, 204]:
+        raise storage_error
+    # If it is one of those three then it has been serialized prior by the generated layer.
+    if isinstance(storage_error, (PartialBatchErrorException,
+                                  ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)):
+        serialized = True
+    error_code = storage_error.response.headers.get('x-ms-error-code')
+    error_message = storage_error.message
+    additional_data = {}
+    error_dict = {}
+    try:
+        error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+        try:
+            if error_body is None or len(error_body) == 0:
+                error_body = storage_error.response.reason
+        except AttributeError:
+            error_body = ''
+        # If it is an XML response
+        if isinstance(error_body, Element):
+            error_dict = {
+                child.tag.lower(): child.text
+                for child in error_body
+            }
+        # If it is a JSON response
+        elif isinstance(error_body, dict):
+            error_dict = error_body.get('error', {})
+        elif not error_code:
+            _LOGGER.warning(
+                'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body))
+            error_dict = {'message': str(error_body)}
+
+        # If we extracted from a Json or XML response
+        # There is a chance error_dict is just a string
+        if error_dict and isinstance(error_dict, dict):
+            error_code = error_dict.get('code')
+            error_message = error_dict.get('message')
+            additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}}
+    except DecodeError:
+        pass
+
+    try:
+        # This check would be unnecessary if we have already serialized the error
+        if error_code and not serialized:
+            error_code = StorageErrorCode(error_code)
+            if error_code in [StorageErrorCode.condition_not_met,
+                              StorageErrorCode.blob_overwritten]:
+                raise_error = ResourceModifiedError
+            if error_code in [StorageErrorCode.invalid_authentication_info,
+                              StorageErrorCode.authentication_failed]:
+                raise_error = ClientAuthenticationError
+            if error_code in [StorageErrorCode.resource_not_found,
+                              StorageErrorCode.cannot_verify_copy_source,
+                              StorageErrorCode.blob_not_found,
+                              StorageErrorCode.queue_not_found,
+                              StorageErrorCode.container_not_found,
+                              StorageErrorCode.parent_not_found,
+                              StorageErrorCode.share_not_found]:
+                raise_error = ResourceNotFoundError
+            if error_code in [StorageErrorCode.account_already_exists,
+                              StorageErrorCode.account_being_created,
+                              StorageErrorCode.resource_already_exists,
+                              StorageErrorCode.resource_type_mismatch,
+                              StorageErrorCode.blob_already_exists,
+                              StorageErrorCode.queue_already_exists,
+                              StorageErrorCode.container_already_exists,
+                              StorageErrorCode.container_being_deleted,
+                              StorageErrorCode.queue_being_deleted,
+                              StorageErrorCode.share_already_exists,
+                              StorageErrorCode.share_being_deleted]:
+                raise_error = ResourceExistsError
+    except ValueError:
+        # Got an unknown error code
+        pass
+
+    # Error message should include all the error properties
+    try:
+        error_message += f"\nErrorCode:{error_code.value}"
+    except AttributeError:
+        error_message += f"\nErrorCode:{error_code}"
+    for name, info in additional_data.items():
+        error_message += f"\n{name}:{info}"
+
+    # No need to create an instance if it has already been serialized by the generated layer
+    if serialized:
+        storage_error.message = error_message
+        error = storage_error
+    else:
+        error = raise_error(message=error_message, response=storage_error.response)
+    # Ensure these properties are stored in the error instance as well (not just the error message)
+    error.error_code = error_code
+    error.additional_info = additional_data
+    # error.args is what's surfaced on the traceback - show error message in all cases
+    error.args = (error.message,)
+    try:
+        # `from None` prevents us from double printing the exception (suppresses generated layer error context)
+        exec("raise error from None")   # pylint: disable=exec-used # nosec
+    except SyntaxError as exc:
+        raise error from exc
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+    internal_user_delegation_key = UserDelegationKey()
+    internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+    internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+    internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+    internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+    internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+    internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+    internal_user_delegation_key.value = service_user_delegation_key.value
+    return internal_user_delegation_key
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/shared_access_signature.py
new file mode 100644
index 00000000..2ef9921d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/shared_access_signature.py
@@ -0,0 +1,243 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import date
+
+from .parser import _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+# cspell:ignoreRegExp rsc.
+# cspell:ignoreRegExp s..?id
+class QueryStringConstants(object):
+    SIGNED_SIGNATURE = 'sig'
+    SIGNED_PERMISSION = 'sp'
+    SIGNED_START = 'st'
+    SIGNED_EXPIRY = 'se'
+    SIGNED_RESOURCE = 'sr'
+    SIGNED_IDENTIFIER = 'si'
+    SIGNED_IP = 'sip'
+    SIGNED_PROTOCOL = 'spr'
+    SIGNED_VERSION = 'sv'
+    SIGNED_CACHE_CONTROL = 'rscc'
+    SIGNED_CONTENT_DISPOSITION = 'rscd'
+    SIGNED_CONTENT_ENCODING = 'rsce'
+    SIGNED_CONTENT_LANGUAGE = 'rscl'
+    SIGNED_CONTENT_TYPE = 'rsct'
+    START_PK = 'spk'
+    START_RK = 'srk'
+    END_PK = 'epk'
+    END_RK = 'erk'
+    SIGNED_RESOURCE_TYPES = 'srt'
+    SIGNED_SERVICES = 'ss'
+    SIGNED_OID = 'skoid'
+    SIGNED_TID = 'sktid'
+    SIGNED_KEY_START = 'skt'
+    SIGNED_KEY_EXPIRY = 'ske'
+    SIGNED_KEY_SERVICE = 'sks'
+    SIGNED_KEY_VERSION = 'skv'
+    SIGNED_ENCRYPTION_SCOPE = 'ses'
+
+    # for ADLS
+    SIGNED_AUTHORIZED_OID = 'saoid'
+    SIGNED_UNAUTHORIZED_OID = 'suoid'
+    SIGNED_CORRELATION_ID = 'scid'
+    SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+    @staticmethod
+    def to_list():
+        return [
+            QueryStringConstants.SIGNED_SIGNATURE,
+            QueryStringConstants.SIGNED_PERMISSION,
+            QueryStringConstants.SIGNED_START,
+            QueryStringConstants.SIGNED_EXPIRY,
+            QueryStringConstants.SIGNED_RESOURCE,
+            QueryStringConstants.SIGNED_IDENTIFIER,
+            QueryStringConstants.SIGNED_IP,
+            QueryStringConstants.SIGNED_PROTOCOL,
+            QueryStringConstants.SIGNED_VERSION,
+            QueryStringConstants.SIGNED_CACHE_CONTROL,
+            QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+            QueryStringConstants.SIGNED_CONTENT_ENCODING,
+            QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+            QueryStringConstants.SIGNED_CONTENT_TYPE,
+            QueryStringConstants.START_PK,
+            QueryStringConstants.START_RK,
+            QueryStringConstants.END_PK,
+            QueryStringConstants.END_RK,
+            QueryStringConstants.SIGNED_RESOURCE_TYPES,
+            QueryStringConstants.SIGNED_SERVICES,
+            QueryStringConstants.SIGNED_OID,
+            QueryStringConstants.SIGNED_TID,
+            QueryStringConstants.SIGNED_KEY_START,
+            QueryStringConstants.SIGNED_KEY_EXPIRY,
+            QueryStringConstants.SIGNED_KEY_SERVICE,
+            QueryStringConstants.SIGNED_KEY_VERSION,
+            QueryStringConstants.SIGNED_ENCRYPTION_SCOPE,
+            # for ADLS
+            QueryStringConstants.SIGNED_AUTHORIZED_OID,
+            QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+            QueryStringConstants.SIGNED_CORRELATION_ID,
+            QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+        ]
+
+
+class SharedAccessSignature(object):
+    '''
+    Provides a factory for creating account access
+    signature tokens with an account name and account key. Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    '''
+
+    def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+        '''
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param str account_key:
+            The access key to generate the shares access signatures.
+        :param str x_ms_version:
+            The service version used to generate the shared access signatures.
+        '''
+        self.account_name = account_name
+        self.account_key = account_key
+        self.x_ms_version = x_ms_version
+
+    def generate_account(
+        self, services,
+        resource_types,
+        permission,
+        expiry,
+        start=None,
+        ip=None,
+        protocol=None,
+        sts_hook=None
+    ) -> str:
+        '''
+        Generates a shared access signature for the account.
+        Use the returned signature with the sas_token parameter of the service
+        or to create a new account object.
+
+        :param Any services: The specified services associated with the shared access signature.
+        :param ResourceTypes resource_types:
+            Specifies the resource types that are accessible with the account
+            SAS. You can combine values to provide access to more than one
+            resource type.
+        :param AccountSasPermissions permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy. You can combine
+            values to provide more than one permission.
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: datetime or str
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: datetime or str
+        :param str ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param str protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        '''
+        sas = _SharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_account(services, resource_types)
+        sas.add_account_signature(self.account_name, self.account_key)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+    def __init__(self):
+        self.query_dict = {}
+        self.string_to_sign = ""
+
+    def _add_query(self, name, val):
+        if val:
+            self.query_dict[name] = str(val) if val is not None else None
+
+    def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+        if isinstance(start, date):
+            start = _to_utc_datetime(start)
+
+        if isinstance(expiry, date):
+            expiry = _to_utc_datetime(expiry)
+
+        self._add_query(QueryStringConstants.SIGNED_START, start)
+        self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+        self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+        self._add_query(QueryStringConstants.SIGNED_IP, ip)
+        self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+        self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+    def add_resource(self, resource):
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+    def add_id(self, policy_id):
+        self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+    def add_account(self, services, resource_types):
+        self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+        self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+    def add_override_response_headers(self, cache_control,
+                                      content_disposition,
+                                      content_encoding,
+                                      content_language,
+                                      content_type):
+        self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+        self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+    def add_account_signature(self, account_name, account_key):
+        def get_value_to_append(query):
+            return_value = self.query_dict.get(query) or ''
+            return return_value + '\n'
+
+        self.string_to_sign = \
+            (account_name + '\n' +
+             get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+             get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+             get_value_to_append(QueryStringConstants.SIGNED_START) +
+             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             '\n'   # Signed Encryption Scope - always empty for fileshare
+             )
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key, self.string_to_sign))
+
+    def get_token(self) -> str:
+        return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None])
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads.py
new file mode 100644
index 00000000..b31cfb32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads.py
@@ -0,0 +1,604 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from concurrent import futures
+from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation
+from itertools import islice
+from math import ceil
+from threading import Lock
+
+from azure.core.tracing.common import with_current_context
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(executor.submit(with_current_context(uploader), next_chunk))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    done, _running = futures.wait(running)
+    range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        validate_content=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        validate_content=validate_content,
+        progress_hook=progress_hook,
+        **kwargs)
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_chunk_streams()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_chunk), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        with futures.ThreadPoolExecutor(max_concurrency) as executor:
+            upload_tasks = uploader.get_substream_blocks()
+            running_futures = [
+                executor.submit(with_current_context(uploader.process_substream_block), u)
+                for u in islice(upload_tasks, 0, max_concurrency)
+            ]
+            range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+    if any(range_ids):
+        return sorted(range_ids)
+    return []
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b""
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if not isinstance(temp, bytes):
+                    raise TypeError("Blob data should be of type bytes.")
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b"" or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    def _update_progress(self, length):
+        if self.progress_lock is not None:
+            with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            self.progress_hook(self.progress_total, self.total_size)
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = self._upload_chunk(chunk_offset, chunk_data)
+        self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    def process_substream_block(self, block_data):
+        return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = self._upload_substream_block(index, block_stream)
+        self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop("modified_access_conditions", None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return index, block_id
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        return not any(bytearray(chunk_data))
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f"bytes={chunk_offset}-{chunk_end}"
+            computed_md5 = None
+            self.response_headers = self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+            self.current_length = int(self.response_headers["blob_append_offset"])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        self.response_headers = self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    def _upload_substream_block(self, index, block_stream):
+        try:
+            self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        return f'bytes={chunk_offset}-{chunk_end}', response
+
+    # TODO: Implement this method.
+    def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class SubStream(IOBase):
+
+    def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+        # Python 2.7: file-like objects created with open() typically support seek(), but are not
+        # derivations of io.IOBase and thus do not implement seekable().
+        # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+        try:
+            # only the main thread runs this, so there's no need grabbing the lock
+            wrapped_stream.seek(0, SEEK_CUR)
+        except Exception as exc:
+            raise ValueError("Wrapped stream must support seek().") from exc
+
+        self._lock = lockObj
+        self._wrapped_stream = wrapped_stream
+        self._position = 0
+        self._stream_begin_index = stream_begin_index
+        self._length = length
+        self._buffer = BytesIO()
+
+        # we must avoid buffering more than necessary, and also not use up too much memory
+        # so the max buffer size is capped at 4MB
+        self._max_buffer_size = (
+            length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+        )
+        self._current_buffer_start = 0
+        self._current_buffer_size = 0
+        super(SubStream, self).__init__()
+
+    def __len__(self):
+        return self._length
+
+    def close(self):
+        if self._buffer:
+            self._buffer.close()
+        self._wrapped_stream = None
+        IOBase.close(self)
+
+    def fileno(self):
+        return self._wrapped_stream.fileno()
+
+    def flush(self):
+        pass
+
+    def read(self, size=None):
+        if self.closed:  # pylint: disable=using-constant-test
+            raise ValueError("Stream is closed.")
+
+        if size is None:
+            size = self._length - self._position
+
+        # adjust if out of bounds
+        if size + self._position >= self._length:
+            size = self._length - self._position
+
+        # return fast
+        if size == 0 or self._buffer.closed:
+            return b""
+
+        # attempt first read from the read buffer and update position
+        read_buffer = self._buffer.read(size)
+        bytes_read = len(read_buffer)
+        bytes_remaining = size - bytes_read
+        self._position += bytes_read
+
+        # repopulate the read buffer from the underlying stream to fulfill the request
+        # ensure the seek and read operations are done atomically (only if a lock is provided)
+        if bytes_remaining > 0:
+            with self._buffer:
+                # either read in the max buffer size specified on the class
+                # or read in just enough data for the current block/sub stream
+                current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+                # lock is only defined if max_concurrency > 1 (parallel uploads)
+                if self._lock:
+                    with self._lock:
+                        # reposition the underlying stream to match the start of the data to read
+                        absolute_position = self._stream_begin_index + self._position
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+                        # If we can't seek to the right location, our read will be corrupted so fail fast.
+                        if self._wrapped_stream.tell() != absolute_position:
+                            raise IOError("Stream failed to seek to the desired location.")
+                        buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+                else:
+                    absolute_position = self._stream_begin_index + self._position
+                    # It's possible that there's connection problem during data transfer,
+                    # so when we retry we don't want to read from current position of wrapped stream,
+                    # instead we should seek to where we want to read from.
+                    if self._wrapped_stream.tell() != absolute_position:
+                        self._wrapped_stream.seek(absolute_position, SEEK_SET)
+
+                    buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+            if buffer_from_stream:
+                # update the buffer with new data from the wrapped stream
+                # we need to note down the start position and size of the buffer, in case seek is performed later
+                self._buffer = BytesIO(buffer_from_stream)
+                self._current_buffer_start = self._position
+                self._current_buffer_size = len(buffer_from_stream)
+
+                # read the remaining bytes from the new buffer and update position
+                second_read_buffer = self._buffer.read(bytes_remaining)
+                read_buffer += second_read_buffer
+                self._position += len(second_read_buffer)
+
+        return read_buffer
+
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        raise UnsupportedOperation
+
+    def seek(self, offset, whence=0):
+        if whence is SEEK_SET:
+            start_index = 0
+        elif whence is SEEK_CUR:
+            start_index = self._position
+        elif whence is SEEK_END:
+            start_index = self._length
+            offset = -offset
+        else:
+            raise ValueError("Invalid argument for the 'whence' parameter.")
+
+        pos = start_index + offset
+
+        if pos > self._length:
+            pos = self._length
+        elif pos < 0:
+            pos = 0
+
+        # check if buffer is still valid
+        # if not, drop buffer
+        if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+            self._buffer.close()
+            self._buffer = BytesIO()
+        else:  # if yes seek to correct position
+            delta = pos - self._current_buffer_start
+            self._buffer.seek(delta, SEEK_SET)
+
+        self._position = pos
+        return pos
+
+    def seekable(self):
+        return True
+
+    def tell(self):
+        return self._position
+
+    def write(self):
+        raise UnsupportedOperation
+
+    def writelines(self):
+        raise UnsupportedOperation
+
+    def writeable(self):
+        return False
+
+
+class IterStreamer(object):
+    """
+    File-like streaming iterator.
+    """
+
+    def __init__(self, generator, encoding="UTF-8"):
+        self.generator = generator
+        self.iterator = iter(generator)
+        self.leftover = b""
+        self.encoding = encoding
+
+    def __len__(self):
+        return self.generator.__len__()
+
+    def __iter__(self):
+        return self.iterator
+
+    def seekable(self):
+        return False
+
+    def __next__(self):
+        return next(self.iterator)
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    def read(self, size):
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = self.__next__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads_async.py
new file mode 100644
index 00000000..3e102ec5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared/uploads_async.py
@@ -0,0 +1,460 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import asyncio
+import inspect
+import threading
+from asyncio import Lock
+from io import UnsupportedOperation
+from itertools import islice
+from math import ceil
+from typing import AsyncGenerator, Union
+
+from .import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .uploads import SubStream, IterStreamer  # pylint: disable=unused-import
+
+
+async def _async_parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = await pending.__anext__()
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopAsyncIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def _parallel_uploads(uploader, pending, running):
+    range_ids = []
+    while True:
+        # Wait for some download to finish before adding a new one
+        done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+        range_ids.extend([chunk.result() for chunk in done])
+        try:
+            for _ in range(0, len(done)):
+                next_chunk = next(pending)
+                running.add(asyncio.ensure_future(uploader(next_chunk)))
+        except StopIteration:
+            break
+
+    # Wait for the remaining uploads to finish
+    if running:
+        done, _running = await asyncio.wait(running)
+        range_ids.extend([chunk.result() for chunk in done])
+    return range_ids
+
+
+async def upload_data_chunks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_chunk_streams()
+        running_futures = []
+        for _ in range(max_concurrency):
+            try:
+                chunk = await upload_tasks.__anext__()
+                running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk)))
+            except StopAsyncIteration:
+                break
+
+        range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        async for chunk in uploader.get_chunk_streams():
+            range_ids.append(await uploader.process_chunk(chunk))
+
+    if any(range_ids):
+        return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+    return uploader.response_headers
+
+
+async def upload_substream_blocks(
+        service=None,
+        uploader_class=None,
+        total_size=None,
+        chunk_size=None,
+        max_concurrency=None,
+        stream=None,
+        progress_hook=None,
+        **kwargs):
+    parallel = max_concurrency > 1
+    if parallel and 'modified_access_conditions' in kwargs:
+        # Access conditions do not work with parallelism
+        kwargs['modified_access_conditions'] = None
+    uploader = uploader_class(
+        service=service,
+        total_size=total_size,
+        chunk_size=chunk_size,
+        stream=stream,
+        parallel=parallel,
+        progress_hook=progress_hook,
+        **kwargs)
+
+    if parallel:
+        upload_tasks = uploader.get_substream_blocks()
+        running_futures = [
+            asyncio.ensure_future(uploader.process_substream_block(u))
+            for u in islice(upload_tasks, 0, max_concurrency)
+        ]
+        range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+    else:
+        range_ids = []
+        for block in uploader.get_substream_blocks():
+            range_ids.append(await uploader.process_substream_block(block))
+    if any(range_ids):
+        return sorted(range_ids)
+    return
+
+
+class _ChunkUploader(object):  # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+            self, service,
+            total_size,
+            chunk_size,
+            stream,
+            parallel,
+            encryptor=None,
+            padder=None,
+            progress_hook=None,
+            **kwargs):
+        self.service = service
+        self.total_size = total_size
+        self.chunk_size = chunk_size
+        self.stream = stream
+        self.parallel = parallel
+
+        # Stream management
+        self.stream_lock = threading.Lock() if parallel else None
+
+        # Progress feedback
+        self.progress_total = 0
+        self.progress_lock = Lock() if parallel else None
+        self.progress_hook = progress_hook
+
+        # Encryption
+        self.encryptor = encryptor
+        self.padder = padder
+        self.response_headers = None
+        self.etag = None
+        self.last_modified = None
+        self.request_options = kwargs
+
+    async def get_chunk_streams(self):
+        index = 0
+        while True:
+            data = b''
+            read_size = self.chunk_size
+
+            # Buffer until we either reach the end of the stream or get a whole chunk.
+            while True:
+                if self.total_size:
+                    read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+                temp = self.stream.read(read_size)
+                if inspect.isawaitable(temp):
+                    temp = await temp
+                if not isinstance(temp, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                data += temp or b""
+
+                # We have read an empty string and so are at the end
+                # of the buffer or we have read a full chunk.
+                if temp == b'' or len(data) == self.chunk_size:
+                    break
+
+            if len(data) == self.chunk_size:
+                if self.padder:
+                    data = self.padder.update(data)
+                if self.encryptor:
+                    data = self.encryptor.update(data)
+                yield index, data
+            else:
+                if self.padder:
+                    data = self.padder.update(data) + self.padder.finalize()
+                if self.encryptor:
+                    data = self.encryptor.update(data) + self.encryptor.finalize()
+                if data:
+                    yield index, data
+                break
+            index += len(data)
+
+    async def process_chunk(self, chunk_data):
+        chunk_bytes = chunk_data[1]
+        chunk_offset = chunk_data[0]
+        return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+    async def _update_progress(self, length):
+        if self.progress_lock is not None:
+            async with self.progress_lock:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await self.progress_hook(self.progress_total, self.total_size)
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+        range_id = await self._upload_chunk(chunk_offset, chunk_data)
+        await self._update_progress(len(chunk_data))
+        return range_id
+
+    def get_substream_blocks(self):
+        assert self.chunk_size is not None
+        lock = self.stream_lock
+        blob_length = self.total_size
+
+        if blob_length is None:
+            blob_length = get_length(self.stream)
+            if blob_length is None:
+                raise ValueError("Unable to determine content length of upload data.")
+
+        blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+        last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+        for i in range(blocks):
+            index = i * self.chunk_size
+            length = last_block_size if i == blocks - 1 else self.chunk_size
+            yield index, SubStream(self.stream, index, length, lock)
+
+    async def process_substream_block(self, block_data):
+        return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+    async def _upload_substream_block(self, index, block_stream):
+        raise NotImplementedError("Must be implemented by child class.")
+
+    async def _upload_substream_block_with_progress(self, index, block_stream):
+        range_id = await self._upload_substream_block(index, block_stream)
+        await self._update_progress(len(block_stream))
+        return range_id
+
+    def set_response_properties(self, resp):
+        self.etag = resp.etag
+        self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        kwargs.pop('modified_access_conditions', None)
+        super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # TODO: This is incorrect, but works with recording.
+        index = f'{chunk_offset:032d}'
+        block_id = encode_base64(url_quote(encode_base64(index)))
+        await self.service.stage_block(
+            block_id,
+            len(chunk_data),
+            body=chunk_data,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options)
+        return index, block_id
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            block_id = f'BlockId{(index//self.chunk_size):05}'
+            await self.service.stage_block(
+                block_id,
+                len(block_stream),
+                block_stream,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+        finally:
+            block_stream.close()
+        return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader):
+
+    def _is_chunk_empty(self, chunk_data):
+        # read until non-zero byte is encountered
+        # if reached the end without returning, then chunk_data is all 0's
+        for each_byte in chunk_data:
+            if each_byte not in [0, b'\x00']:
+                return False
+        return True
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        # avoid uploading the empty pages
+        if not self._is_chunk_empty(chunk_data):
+            chunk_end = chunk_offset + len(chunk_data) - 1
+            content_range = f'bytes={chunk_offset}-{chunk_end}'
+            computed_md5 = None
+            self.response_headers = await self.service.upload_pages(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                transactional_content_md5=computed_md5,
+                range=content_range,
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+            if not self.parallel and self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader):
+
+    def __init__(self, *args, **kwargs):
+        super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+        self.current_length = None
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        if self.current_length is None:
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+            self.current_length = int(self.response_headers['blob_append_offset'])
+        else:
+            self.request_options['append_position_access_conditions'].append_position = \
+                self.current_length + chunk_offset
+            self.response_headers = await self.service.append_block(
+                body=chunk_data,
+                content_length=len(chunk_data),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options)
+
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        self.response_headers = await self.service.append_data(
+            body=chunk_data,
+            position=chunk_offset,
+            content_length=len(chunk_data),
+            cls=return_response_headers,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+
+        if not self.parallel and self.request_options.get('modified_access_conditions'):
+            self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+    async def _upload_substream_block(self, index, block_stream):
+        try:
+            await self.service.append_data(
+                body=block_stream,
+                position=index,
+                content_length=len(block_stream),
+                cls=return_response_headers,
+                data_stream_total=self.total_size,
+                upload_stream_current=self.progress_total,
+                **self.request_options
+            )
+        finally:
+            block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader):
+
+    async def _upload_chunk(self, chunk_offset, chunk_data):
+        length = len(chunk_data)
+        chunk_end = chunk_offset + length - 1
+        response = await self.service.upload_range(
+            chunk_data,
+            chunk_offset,
+            length,
+            data_stream_total=self.total_size,
+            upload_stream_current=self.progress_total,
+            **self.request_options
+        )
+        range_id = f'bytes={chunk_offset}-{chunk_end}'
+        return range_id, response
+
+    # TODO: Implement this method.
+    async def _upload_substream_block(self, index, block_stream):
+        pass
+
+
+class AsyncIterStreamer():
+    """
+    File-like streaming object for AsyncGenerators.
+    """
+    def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"):
+        self.iterator = generator.__aiter__()
+        self.leftover = b""
+        self.encoding = encoding
+
+    def seekable(self):
+        return False
+
+    def tell(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator does not support tell.")
+
+    def seek(self, *args, **kwargs):
+        raise UnsupportedOperation("Data generator is not seekable.")
+
+    async def read(self, size: int) -> bytes:
+        data = self.leftover
+        count = len(self.leftover)
+        try:
+            while count < size:
+                chunk = await self.iterator.__anext__()
+                if isinstance(chunk, str):
+                    chunk = chunk.encode(self.encoding)
+                data += chunk
+                count += len(chunk)
+        # This means count < size and what's leftover will be returned in this call.
+        except StopAsyncIteration:
+            self.leftover = b""
+
+        if count >= size:
+            self.leftover = data[size:]
+
+        return data[:size]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared_access_signature.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared_access_signature.py
new file mode 100644
index 00000000..24679c72
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_shared_access_signature.py
@@ -0,0 +1,574 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+from typing import (
+    Any, Callable, List, Optional, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import parse_qs
+
+from ._shared import sign_string
+from ._shared.constants import X_MS_VERSION
+from ._shared.models import Services
+from ._shared.shared_access_signature import QueryStringConstants, SharedAccessSignature, _SharedAccessHelper
+
+if TYPE_CHECKING:
+    from datetime import datetime
+    from azure.storage.fileshare import (
+        AccountSasPermissions,
+        FileSasPermissions,
+        ShareSasPermissions,
+        ResourceTypes
+    )
+
+
+class FileSharedAccessSignature(SharedAccessSignature):
+    """
+    Provides a factory for creating file and share access
+    signature tokens with a common account name and account key.  Users can either
+    use the factory or can construct the appropriate service and use the
+    generate_*_shared_access_signature method directly.
+    """
+
+    def __init__(self, account_name: str, account_key: str) -> None:
+        """
+        :param str account_name:
+            The storage account name used to generate the shared access signatures.
+        :param str account_key:
+            The access key to generate the shares access signatures.
+        """
+        super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
+
+    def generate_file(
+        self, share_name: str,
+        directory_name: Optional[str] = None,
+        file_name: Optional[str] = None,
+        permission: Optional[Union["FileSasPermissions", str]] = None,
+        expiry: Optional[Union["datetime", str]] = None,
+        start: Optional[Union["datetime", str]] = None,
+        policy_id: Optional[str] = None,
+        ip: Optional[str] = None,
+        protocol: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_type: Optional[str] = None,
+        sts_hook: Optional[Callable[[str], None]] = None
+    ) -> str:
+        """
+        Generates a shared access signature for the file.
+        Use the returned signature with the sas_token parameter of FileService.
+
+        :param str share_name:
+            Name of share.
+        :param Optional[str] directory_name:
+            Name of directory. SAS tokens cannot be created for directories, so
+            this parameter should only be present if file_name is provided.
+        :param Optional[str] file_name:
+            Name of file.
+        :param permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Permissions must be ordered rcwd.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy.
+        :type permission: str or FileSasPermissions or None
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: ~datetime.datetime or str or None
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: ~datetime.datetime or str or None
+        :param Optional[str] policy_id:
+            A unique value up to 64 characters in length that correlates to a
+            stored access policy. To create a stored access policy, use
+            set_file_service_properties.
+        :param Optional[str] ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param Optional[str] protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :param Optional[str] cache_control:
+            Response header value for Cache-Control when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_disposition:
+            Response header value for Content-Disposition when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_encoding:
+            Response header value for Content-Encoding when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_language:
+            Response header value for Content-Language when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_type:
+            Response header value for Content-Type when resource is accessed
+            using this shared access signature.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        """
+        resource_path = share_name
+        if directory_name is not None:
+            resource_path += '/' + str(directory_name)
+        if file_name is not None:
+            resource_path += '/' + str(file_name)
+
+        sas = _FileSharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_id(policy_id)
+        sas.add_resource('f')
+        sas.add_override_response_headers(cache_control, content_disposition,
+                                          content_encoding, content_language,
+                                          content_type)
+        sas.add_resource_signature(self.account_name, self.account_key, resource_path)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+    def generate_share(
+        self, share_name: str,
+        permission: Optional[Union["ShareSasPermissions", str]] = None,
+        expiry: Optional[Union["datetime", str]] = None,
+        start: Optional[Union["datetime", str]] = None,
+        policy_id: Optional[str] = None,
+        ip: Optional[str] = None,
+        protocol: Optional[str] = None,
+        cache_control: Optional[str] = None,
+        content_disposition: Optional[str] = None,
+        content_encoding: Optional[str] = None,
+        content_language: Optional[str] = None,
+        content_type: Optional[str] = None,
+        sts_hook: Optional[Callable[[str], None]] = None,
+    ) -> str:
+        '''
+        Generates a shared access signature for the share.
+        Use the returned signature with the sas_token parameter of FileService.
+
+        :param str share_name:
+            Name of share.
+        :param permission:
+            The permissions associated with the shared access signature. The
+            user is restricted to operations allowed by the permissions.
+            Permissions must be ordered rcwdl.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has been
+            specified in an associated stored access policy.
+        :type permission: ~azure.storage.fileshare.ShareSasPermissions or str or None
+        :param expiry:
+            The time at which the shared access signature becomes invalid.
+            Required unless an id is given referencing a stored access policy
+            which contains this field. This field must be omitted if it has
+            been specified in an associated stored access policy. Azure will always
+            convert values to UTC. If a date is passed in without timezone info, it
+            is assumed to be UTC.
+        :type expiry: ~datetime.datetime or str or None
+        :param start:
+            The time at which the shared access signature becomes valid. If
+            omitted, start time for this call is assumed to be the time when the
+            storage service receives the request. The provided datetime will always
+            be interpreted as UTC.
+        :type start: ~datetime.datetime or str or None
+        :param Optional[str] policy_id:
+            A unique value up to 64 characters in length that correlates to a
+            stored access policy. To create a stored access policy, use
+            set_file_service_properties.
+        :param Optional[str] ip:
+            Specifies an IP address or a range of IP addresses from which to accept requests.
+            If the IP address from which the request originates does not match the IP address
+            or address range specified on the SAS token, the request is not authenticated.
+            For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+            restricts the request to those IP addresses.
+        :param Optional[str] protocol:
+            Specifies the protocol permitted for a request made. The default value
+            is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+        :param Optional[str] cache_control:
+            Response header value for Cache-Control when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_disposition:
+            Response header value for Content-Disposition when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_encoding:
+            Response header value for Content-Encoding when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_language:
+            Response header value for Content-Language when resource is accessed
+            using this shared access signature.
+        :param Optional[str] content_type:
+            Response header value for Content-Type when resource is accessed
+            using this shared access signature.
+        :param sts_hook:
+            For debugging purposes only. If provided, the hook is called with the string to sign
+            that was used to generate the SAS.
+        :type sts_hook: Optional[Callable[[str], None]]
+        :returns: The generated SAS token for the account.
+        :rtype: str
+        '''
+        sas = _FileSharedAccessHelper()
+        sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+        sas.add_id(policy_id)
+        sas.add_resource('s')
+        sas.add_override_response_headers(cache_control, content_disposition,
+                                          content_encoding, content_language,
+                                          content_type)
+        sas.add_resource_signature(self.account_name, self.account_key, share_name)
+
+        if sts_hook is not None:
+            sts_hook(sas.string_to_sign)
+
+        return sas.get_token()
+
+
+class _FileSharedAccessHelper(_SharedAccessHelper):
+
+    def add_resource_signature(self, account_name, account_key, path):
+        def get_value_to_append(query):
+            return_value = self.query_dict.get(query) or ''
+            return return_value + '\n'
+
+        if path[0] != '/':
+            path = '/' + path
+
+        canonicalized_resource = '/file/' + account_name + path + '\n'
+
+        # Form the string to sign from shared_access_policy and canonicalized
+        # resource. The order of values is important.
+        string_to_sign = \
+            (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_START) +
+             get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+             canonicalized_resource +
+             get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) +
+             get_value_to_append(QueryStringConstants.SIGNED_IP) +
+             get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+             get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+             get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
+             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
+             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
+             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
+             get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
+
+        # remove the trailing newline
+        if string_to_sign[-1] == '\n':
+            string_to_sign = string_to_sign[:-1]
+
+        self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+                        sign_string(account_key, string_to_sign))
+        self.string_to_sign = string_to_sign
+
+
+def generate_account_sas(
+    account_name: str,
+    account_key: str,
+    resource_types: Union["ResourceTypes", str],
+    permission: Union["AccountSasPermissions", str],
+    expiry: Union["datetime", str],
+    start: Optional[Union["datetime", str]] = None,
+    ip: Optional[str] = None,
+    *,
+    services: Union[Services, str] = Services(fileshare=True),
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for the file service.
+
+    Use the returned signature with the credential parameter of any ShareServiceClient,
+    ShareClient, ShareDirectoryClient, or ShareFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+    :param resource_types:
+        Specifies the resource types that are accessible with the account SAS.
+    :type resource_types: ~azure.storage.fileshare.ResourceTypes or str
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+    :type permission: ~azure.storage.fileshare.AccountSasPermissions or str
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        The provided datetime will always be interpreted as UTC.
+    :type expiry: ~datetime.datetime or str
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str or None
+    :param str ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword Union[Services, str] services:
+        Specifies the services that the Shared Access Signature (sas) token will be able to be utilized with.
+        Will default to only this package (i.e. fileshare) if not provided.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/file_samples_authentication.py
+            :start-after: [START generate_sas_token]
+            :end-before: [END generate_sas_token]
+            :language: python
+            :dedent: 8
+            :caption: Generate a sas token.
+    """
+    sas = SharedAccessSignature(account_name, account_key)
+    return sas.generate_account(
+        services=services,
+        resource_types=resource_types,
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_share_sas(
+    account_name: str,
+    share_name: str,
+    account_key: str,
+    permission: Optional[Union["ShareSasPermissions", str]] = None,
+    expiry: Optional[Union["datetime", str]] = None,
+    start: Optional[Union["datetime", str]] = None,
+    policy_id: Optional[str] = None,
+    ip: Optional[str] = None,
+    *,
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for a share.
+
+    Use the returned signature with the credential parameter of any ShareServiceClient,
+    ShareClient, ShareDirectoryClient, or ShareFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str share_name:
+        The name of the share.
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered rcwdl.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: ~azure.storage.fileshare.ShareSasPermissions or str or None
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str or None
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str or None
+    :param Optional[str] policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy. To create a stored access policy, use
+        :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`.
+    :param Optional[str] ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    if not policy_id:
+        if not expiry:
+            raise ValueError("'expiry' parameter must be provided when not using a stored access policy.")
+        if not permission:
+            raise ValueError("'permission' parameter must be provided when not using a stored access policy.")
+    sas = FileSharedAccessSignature(account_name, account_key)
+    return sas.generate_share(
+        share_name=share_name,
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        policy_id=policy_id,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def generate_file_sas(
+    account_name: str,
+    share_name: str,
+    file_path: List[str],
+    account_key: str,
+    permission: Optional[Union["FileSasPermissions", str]] = None,
+    expiry: Optional[Union["datetime", str]] = None,
+    start: Optional[Union["datetime", str]] = None,
+    policy_id: Optional[str] = None,
+    ip: Optional[str] = None,
+    *,
+    sts_hook: Optional[Callable[[str], None]] = None,
+    **kwargs: Any
+) -> str:
+    """Generates a shared access signature for a file.
+
+    Use the returned signature with the credential parameter of any ShareServiceClient,
+    ShareClient, ShareDirectoryClient, or ShareFileClient.
+
+    :param str account_name:
+        The storage account name used to generate the shared access signature.
+    :param str share_name:
+        The name of the share.
+    :param file_path:
+        The file path represented as a list of path segments, including the file name.
+    :type file_path: List[str]
+    :param str account_key:
+        The account key, also called shared key or access key, to generate the shared access signature.
+    :param permission:
+        The permissions associated with the shared access signature. The
+        user is restricted to operations allowed by the permissions.
+        Permissions must be ordered rcwd.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has been
+        specified in an associated stored access policy.
+    :type permission: ~azure.storage.fileshare.FileSasPermissions or str or None
+    :param expiry:
+        The time at which the shared access signature becomes invalid.
+        Required unless an id is given referencing a stored access policy
+        which contains this field. This field must be omitted if it has
+        been specified in an associated stored access policy. Azure will always
+        convert values to UTC. If a date is passed in without timezone info, it
+        is assumed to be UTC.
+    :type expiry: ~datetime.datetime or str or None
+    :param start:
+        The time at which the shared access signature becomes valid. If
+        omitted, start time for this call is assumed to be the time when the
+        storage service receives the request. The provided datetime will always
+        be interpreted as UTC.
+    :type start: ~datetime.datetime or str or None
+    :param Optional[str] policy_id:
+        A unique value up to 64 characters in length that correlates to a
+        stored access policy.
+    :param Optional[str] ip:
+        Specifies an IP address or a range of IP addresses from which to accept requests.
+        If the IP address from which the request originates does not match the IP address
+        or address range specified on the SAS token, the request is not authenticated.
+        For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+        restricts the request to those IP addresses.
+    :keyword str cache_control:
+        Response header value for Cache-Control when resource is accessed
+        using this shared access signature.
+    :keyword str content_disposition:
+        Response header value for Content-Disposition when resource is accessed
+        using this shared access signature.
+    :keyword str content_encoding:
+        Response header value for Content-Encoding when resource is accessed
+        using this shared access signature.
+    :keyword str content_language:
+        Response header value for Content-Language when resource is accessed
+        using this shared access signature.
+    :keyword str content_type:
+        Response header value for Content-Type when resource is accessed
+        using this shared access signature.
+    :keyword str protocol:
+        Specifies the protocol permitted for a request made. The default value is https.
+    :keyword sts_hook:
+        For debugging purposes only. If provided, the hook is called with the string to sign
+        that was used to generate the SAS.
+    :paramtype sts_hook: Optional[Callable[[str], None]]
+    :return: A Shared Access Signature (sas) token.
+    :rtype: str
+    """
+    if not policy_id:
+        if not expiry:
+            raise ValueError("'expiry' parameter must be provided when not using a stored access policy.")
+        if not permission:
+            raise ValueError("'permission' parameter must be provided when not using a stored access policy.")
+    sas = FileSharedAccessSignature(account_name, account_key)
+    if len(file_path) > 1:
+        dir_path = '/'.join(file_path[:-1])
+    else:
+        dir_path = None
+    return sas.generate_file(
+        share_name=share_name,
+        directory_name=dir_path,
+        file_name=file_path[-1],
+        permission=permission,
+        expiry=expiry,
+        start=start,
+        policy_id=policy_id,
+        ip=ip,
+        sts_hook=sts_hook,
+        **kwargs
+    )
+
+
+def _is_credential_sastoken(credential: Any) -> bool:
+    if not credential or not isinstance(credential, str):
+        return False
+
+    sas_values = QueryStringConstants.to_list()
+    parsed_query = parse_qs(credential.lstrip("?"))
+    if parsed_query and all(k in sas_values for k in parsed_query):
+        return True
+    return False
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_version.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_version.py
new file mode 100644
index 00000000..79adcbe3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.21.0"
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py
new file mode 100644
index 00000000..73393b81
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/__init__.py
@@ -0,0 +1,20 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._file_client_async import ShareFileClient
+from ._directory_client_async import ShareDirectoryClient
+from ._share_client_async import ShareClient
+from ._share_service_client_async import ShareServiceClient
+from ._lease_async import ShareLeaseClient
+
+
+__all__ = [
+    'ShareFileClient',
+    'ShareDirectoryClient',
+    'ShareClient',
+    'ShareServiceClient',
+    'ShareLeaseClient',
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py
new file mode 100644
index 00000000..8673362c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_directory_client_async.py
@@ -0,0 +1,988 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import time
+import warnings
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, cast, Dict, IO, Iterable, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_directory_properties
+from .._directory_client_helpers import (
+    _format_url,
+    _from_directory_url,
+    _parse_url
+)
+from .._generated.aio import AzureFileStorage
+from .._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from .._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import parse_connection_str, AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from ._file_client_async import ShareFileClient
+from ._models import DirectoryPropertiesPaged, Handle, HandlesPaged
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import DirectoryProperties, FileProperties, NTFSAttributes
+
+
+class ShareDirectoryClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific directory, although it may not yet exist.
+
+    For operations relating to a specific subdirectory or file in this share, the clients for those
+    entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the directory,
+        use the :func:`from_directory_url` classmethod.
+    :param share_name:
+        The name of the share for the directory.
+    :type share_name: str
+    :param str directory_path:
+        The directory path for the directory with which to interact.
+        If specified, this value will override a directory value specified in the directory URL.
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of an AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        directory_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.directory_path = directory_path
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareDirectoryClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_directory_url(
+        cls, directory_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create a ShareDirectoryClient from a directory url.
+
+        :param str directory_url:
+            The full URI to the directory.
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory authentication.
+            Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        account_url, share_name, directory_path, snapshot = _from_directory_url(directory_url, snapshot)
+        return cls(
+            account_url=account_url, share_name=share_name, directory_path=directory_path,
+            snapshot=snapshot, credential=credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including the current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self.directory_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        directory_path: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareDirectoryClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str directory_path:
+            The directory path.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of an AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            Optional[Union[str, dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "AsyncTokenCredential"]]
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory authentication.
+            Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A directory client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs)
+
+    def get_file_client(self, file_name: str, **kwargs: Any) -> ShareFileClient:
+        """Get a client to interact with a specific file.
+
+        The file need not already exist.
+
+        :param str file_name:
+            The name of the file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if self.directory_path:
+            file_name = self.directory_path.rstrip('/') + "/" + file_name
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+        return ShareFileClient(
+            self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent,
+            **kwargs)
+
+    def get_subdirectory_client(self, directory_name: str, **kwargs) -> "ShareDirectoryClient":
+        """Get a client to interact with a specific subdirectory.
+
+        The subdirectory need not already exist.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START get_subdirectory_client]
+                :end-before: [END get_subdirectory_client]
+                :language: python
+                :dedent: 16
+                :caption: Gets the subdirectory client.
+        """
+        directory_path = directory_name
+        if self.directory_path:
+            directory_path = self.directory_path.rstrip('/') + "/" + directory_name
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent,
+            **kwargs)
+
+    @distributed_trace_async
+    async def create_directory(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new directory under the directory referenced by the client.
+
+        :keyword file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "none" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :keyword file_creation_time: Creation time for the directory.
+        :paramtype file_creation_time: str or ~datetime.datetime or None
+        :keyword file_last_write_time: Last write time for the directory.
+        :paramtype file_last_write_time: str or ~datetime.datetime or None
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set
+            for the directory/file. This header can be used if Permission size is
+            <= 8KB, else file-permission-key header shall be used.
+            Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory/file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START create_directory]
+                :end-before: [END create_directory]
+                :language: python
+                :dedent: 16
+                :caption: Creates a directory.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        file_attributes = kwargs.pop('file_attributes', None)
+        file_creation_time = kwargs.pop('file_creation_time', None)
+        file_last_write_time = kwargs.pop('file_last_write_time', None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        file_permission = kwargs.pop('file_permission', None)
+        file_permission_key = kwargs.pop('file_permission_key', None)
+        file_permission = _get_file_permission(file_permission, file_permission_key, None)
+
+        try:
+            return cast(Dict[str, Any], await self._client.directory.create(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=file_permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_directory(self, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_directory]
+                :end-before: [END delete_directory]
+                :language: python
+                :dedent: 16
+                :caption: Deletes a directory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.directory.delete(timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def rename_directory(self, new_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """
+        Rename the source directory.
+
+        :param str new_name:
+            The new directory name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the directory.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the directory.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the directory.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new Directory Client.
+        :rtype: ~azure.storage.fileshare.ShareDirectoryClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new directory name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_dir_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_dir_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_dir_sas = self._query_str.strip('?')
+
+        new_directory_client = ShareDirectoryClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_dir_path,
+            credential=new_dir_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            await new_directory_client._client.directory.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                destination_lease_access_conditions=destination_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_directory_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self,
+        name_starts_with: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists all the directories and files under the directory.
+
+        :param str name_starts_with:
+            Filters the results to return only entities whose names
+            begin with the specified prefix.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START lists_directory]
+                :end-before: [END lists_directory]
+                :language: python
+                :dedent: 16
+                :caption: List directories and files.
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_files_and_directories_segment,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=DirectoryPropertiesPaged)
+
+    @distributed_trace
+    def list_handles(self, recursive: bool = False, **kwargs: Any) -> AsyncItemPaged["Handle"]:
+        """Lists opened handles on a directory or a file under the directory.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.directory.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            recursive=recursive,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a directory exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the directory exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            await self._client.directory.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def close_handle(self, handle: Union[str, "Handle"], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = await self._client.directory.force_close_handles(
+                handle_id,
+                marker=None,
+                recursive=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def close_all_handles(self, recursive: bool = False, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :param bool recursive:
+            Boolean that specifies if operation should apply to the directory specified by the client,
+            its files, its subdirectories and their files. Default value is False.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = await self._client.directory.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    recursive=recursive,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace_async
+    async def get_directory_properties(self, **kwargs: Any) -> "DirectoryProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified directory. The data returned does not include the directory's
+        list of files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: DirectoryProperties
+        :rtype: ~azure.storage.fileshare.DirectoryProperties
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = cast("DirectoryProperties", await self._client.directory.get_properties(
+                timeout=timeout,
+                cls=deserialize_directory_properties,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response
+
+    @distributed_trace_async
+    async def set_directory_metadata(self, metadata: Dict[str, Any], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the directory.
+
+        Each call to this operation replaces all existing metadata
+        attached to the directory. To remove all metadata from the directory,
+        call this operation with an empty metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the directory as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Directory-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.directory.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the directory.
+
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the directory. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        timeout = kwargs.pop('timeout', None)
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.directory.set_properties(
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_subdirectory(self, directory_name: str, **kwargs: Any) -> "ShareDirectoryClient":
+        """Creates a new subdirectory and returns a client to interact
+        with the subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword metadata:
+            Name-value pairs associated with the subdirectory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START create_subdirectory]
+                :end-before: [END create_subdirectory]
+                :language: python
+                :dedent: 16
+                :caption: Create a subdirectory.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs)
+        return subdir
+
+    @distributed_trace_async
+    async def delete_subdirectory(self, directory_name: str, **kwargs: Any) -> None:
+        """Deletes a subdirectory.
+
+        :param str directory_name:
+            The name of the subdirectory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_subdirectory]
+                :end-before: [END delete_subdirectory]
+                :language: python
+                :dedent: 16
+                :caption: Delete a subdirectory.
+        """
+        timeout = kwargs.pop('timeout', None)
+        subdir = self.get_subdirectory_client(directory_name)
+        await subdir.delete_directory(timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def upload_file(
+        self, file_name: str,
+        data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> ShareFileClient:
+        """Creates a new file in the directory and returns a ShareFileClient
+        to interact with the file.
+
+        :param str file_name:
+            The name of the file.
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: ShareFileClient
+        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START upload_file_to_directory]
+                :end-before: [END upload_file_to_directory]
+                :language: python
+                :dedent: 16
+                :caption: Upload a file to a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        await file_client.upload_file(
+            data,
+            length=length,
+            **kwargs)
+        return file_client
+
+    @distributed_trace_async
+    async def delete_file(self, file_name: str, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is later
+        deleted during garbage collection.
+
+        :param str file_name:
+            The name of the file to delete.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_directory_async.py
+                :start-after: [START delete_file_in_directory]
+                :end-before: [END delete_file_in_directory]
+                :language: python
+                :dedent: 16
+                :caption: Delete a file in a directory.
+        """
+        file_client = self.get_file_client(file_name)
+        await file_client.delete_file(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py
new file mode 100644
index 00000000..278c5e01
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_download_async.py
@@ -0,0 +1,502 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+# mypy: disable-error-code=override
+
+import asyncio
+import sys
+import warnings
+from io import BytesIO
+from itertools import islice
+from typing import (
+    Any, AsyncIterator, Awaitable, Callable,
+    cast, Generator, IO, Optional, Tuple,
+    TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError, ResourceModifiedError
+from .._download import _ChunkDownloader
+from .._shared.request_handlers import validate_and_format_range_headers
+from .._shared.response_handlers import parse_length_from_content_range, process_storage_error
+
+if TYPE_CHECKING:
+    from .._generated.aio.operations import FileOperations
+    from .._models import FileProperties
+    from .._shared.models import StorageConfiguration
+
+
+async def process_content(data: Any) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+
+    try:
+        await data.response.load_body()
+        return cast(bytes, data.response.body())
+    except Exception as error:
+        raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) from error
+
+
+class _AsyncChunkDownloader(_ChunkDownloader):
+    def __init__(self, **kwargs: Any) -> None:
+        super(_AsyncChunkDownloader, self).__init__(**kwargs)
+        self.stream_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+        self.progress_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+
+    async def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            await self._write_to_stream(chunk_data, chunk_start)
+            await self._update_progress(length)
+
+    async def yield_chunk(self, chunk_start: int) -> bytes:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return await self._download_chunk(chunk_start, chunk_end - 1)
+
+    async def _update_progress(self, length: int) -> None:
+        if self.progress_lock_async:
+            async with self.progress_lock_async:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await cast(Callable[[int, Optional[int]], Awaitable[Any]], self.progress_hook)(
+                self.progress_total, self.total_size)
+
+    async def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock_async:
+            async with self.stream_lock_async:
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    async def _download_chunk(self, chunk_start: int, chunk_end: int) -> bytes:
+        range_header, range_validation = validate_and_format_range_headers(
+            chunk_start,
+            chunk_end,
+            check_content_md5=self.validate_content
+        )
+        try:
+            _, response = await cast(Awaitable[Any], self.client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self.validate_content,
+                data_stream_total=self.total_size,
+                download_stream_current=self.progress_total,
+                **self.request_options
+            ))
+            if response.properties.etag != self.etag:
+                raise ResourceModifiedError(message="The file has been modified while downloading.")
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        chunk_data = await process_content(response)
+        return chunk_data
+
+
+class _AsyncChunkIterator(object):
+    """Async iterator for chunks in file download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_AsyncChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> None:
+        raise TypeError("Async stream must be iterated asynchronously.")
+
+    def __aiter__(self) -> AsyncIterator[bytes]:
+        return self
+
+    async def __anext__(self) -> bytes:
+        if self._complete:
+            raise StopAsyncIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            chunk = next(self._iter_chunks)
+            self._current_content += await self._iter_downloader.yield_chunk(chunk)
+        except StopIteration as exc:
+            self._complete = True
+            # it's likely that there some data left in self._current_content
+            if self._current_content:
+                return self._current_content
+            raise StopAsyncIteration("Download complete") from exc
+
+        return self._get_chunk_data()
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(object):  # pylint: disable=too-many-instance-attributes
+    """A streaming object to download from Azure Storage."""
+
+    name: str
+    """The name of the file being downloaded."""
+    path: str
+    """The full path of the file."""
+    share: str
+    """The name of the share where the file is."""
+    properties: "FileProperties"
+    """The properties of the file being downloaded. If only a range of the data is being
+        downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+        otherwise the total size of the file."""
+
+    def __init__(
+        self, client: "FileOperations" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        path: str = None,  # type: ignore [assignment]
+        share: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.path = path
+        self.share = share
+        self.size = 0
+
+        self._client = client
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._location_mode = None
+        self._download_complete = False
+        self._current_content = b""
+        self._file_size = 0
+        self._response = None
+        self._etag = ""
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        self._first_get_size = self._config.max_single_get_size if not self._validate_content \
+            else self._config.max_chunk_get_size
+        initial_request_start = self._start_range or 0
+        if self._end_range is not None and self._end_range - initial_request_start < self._first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + self._first_get_size - 1
+
+        self._initial_range = (initial_request_start, initial_request_end)
+
+    def __len__(self) -> int:
+        return self.size
+
+    async def _setup(self) -> None:
+        self._response = await self._initial_request()
+        self.properties = self._response.properties  # type: ignore [attr-defined]
+        self.properties.name = self.name
+        self.properties.path = self.path
+        self.properties.share = self.share
+
+        # Set the content length to the download size instead of the size of
+        # the last range
+        self.properties.size = self.size
+
+        # Overwrite the content range to the user requested range
+        self.properties.content_range = f'bytes {self._start_range}-{self._end_range}/{self._file_size}'
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+        if self.size == 0:
+            self._current_content = b""
+        else:
+            self._current_content = await process_content(self._response)
+
+    async def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content)
+
+        try:
+            location_mode, response = cast(Tuple[Optional[str], Any], await self._client.download(
+                range=range_header,
+                range_get_content_md5=range_validation,
+                validate_content=self._validate_content,
+                data_stream_total=None,
+                download_stream_current=0,
+                **self._request_options
+            ))
+
+            # Check the location we read from to ensure we use the same one
+            # for subsequent requests.
+            self._location_mode = location_mode
+
+            # Parse the total file size and adjust the download size if ranges
+            # were specified
+            self._file_size = parse_length_from_content_range(response.properties.content_range)
+            if self._file_size is None:
+                raise ValueError("Required Content-Range response header is missing or malformed.")
+
+            if self._end_range is not None:
+                # Use the length unless it is over the end of the file
+                self.size = min(self._file_size, self._end_range - self._start_range + 1)
+            elif self._start_range is not None:
+                self.size = self._file_size - self._start_range
+            else:
+                self.size = self._file_size
+
+        except HttpResponseError as error:
+            if self._start_range is None and error.response and error.response.status_code == 416:
+                # Get range will fail on an empty file. If the user did not
+                # request a range, do a regular get request in order to get
+                # any properties.
+                try:
+                    _, response = cast(Tuple[Optional[Any], Any], await self._client.download(
+                        validate_content=self._validate_content,
+                        data_stream_total=0,
+                        download_stream_current=0,
+                        **self._request_options
+                    ))
+                except HttpResponseError as e:
+                    process_storage_error(e)
+
+                # Set the download size to empty
+                self.size = 0
+                self._file_size = 0
+            else:
+                process_storage_error(error)
+
+        # If the file is small, the download is complete at this point.
+        # If file size is large, download the rest of the file in chunks.
+        if response.properties.size == self.size:
+            self._download_complete = True
+        self._etag = response.properties.etag
+        return response
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """
+        Iterate over chunks in the download stream.
+
+        :return: An iterator of the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+        """
+        if self.size == 0 or self._download_complete:
+            iter_downloader = None
+        else:
+            data_end = self._file_size
+            if self._end_range is not None:
+                # Use the length unless it is over the end of the file
+                data_end = min(self._file_size, self._end_range + 1)
+            iter_downloader = _AsyncChunkDownloader(
+                client=self._client,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._first_get_size,
+                start_range=self._initial_range[1] + 1,  # Start where the first download ended
+                end_range=data_end,
+                stream=None,
+                parallel=False,
+                validate_content=self._validate_content,
+                use_location=self._location_mode,
+                etag=self._etag,
+                **self._request_options)
+        return _AsyncChunkIterator(
+            size=self.size,
+            content=self._current_content,
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size
+        )
+
+    async def readall(self) -> bytes:
+        """Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+        :return: The entire blob content as bytes
+        :rtype: bytes
+        """
+        stream = BytesIO()
+        await self.readinto(stream)
+        data = stream.getvalue()
+        if self._encoding:
+            return data.decode(self._encoding)  # type: ignore [return-value]
+        return data
+
+    async def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :return: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        return await self.readall()
+
+    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this file, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :return: The contents of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return await self.readall()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this file to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        # the stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # Write the content to the user stream
+        stream.write(self._current_content)
+        if self._progress_hook:
+            await self._progress_hook(len(self._current_content), self.size)
+
+        if self._download_complete:
+            return self.size
+
+        data_end = self._file_size
+        if self._end_range is not None:
+            # Use the length unless it is over the end of the file
+            data_end = min(self._file_size, self._end_range + 1)
+
+        downloader = _AsyncChunkDownloader(
+            client=self._client,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._first_get_size,
+            start_range=self._initial_range[1] + 1,  # start where the first download ended
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            etag=self._etag,
+            **self._request_options)
+
+        dl_tasks = downloader.get_chunk_offsets()
+        running_futures = {
+            asyncio.ensure_future(downloader.process_chunk(d))
+            for d in islice(dl_tasks, 0, self._max_concurrency)
+        }
+        while running_futures:
+            # Wait for some download to finish before adding a new one
+            done, running_futures = await asyncio.wait(
+                running_futures, return_when=asyncio.FIRST_COMPLETED)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+            try:
+                for _ in range(0, len(done)):
+                    next_chunk = next(dl_tasks)
+                    running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+            except StopIteration:
+                break
+
+        if running_futures:
+            # Wait for the remaining downloads to finish
+            done, _running_futures = await asyncio.wait(running_futures)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+        return self.size
+
+    async def download_to_stream(self, stream, max_concurrency=1):
+        """Download the contents of this file to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded file.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        self._max_concurrency = max_concurrency
+        await self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py
new file mode 100644
index 00000000..6272949b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_file_client_async.py
@@ -0,0 +1,1740 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, too-many-public-methods, docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import time
+import warnings
+from datetime import datetime
+from io import BytesIO
+from typing import (
+    Any, AnyStr, AsyncGenerator, AsyncIterable, Callable, cast,
+    Dict, IO, Iterable, List, Optional, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result
+from .._file_client_helpers import (
+    _format_url,
+    _from_file_url,
+    _get_ranges_options,
+    _parse_url,
+    _upload_range_from_url_options
+)
+from .._generated.aio import AzureFileStorage
+from .._generated.models import FileHTTPHeaders
+from .._parser import _datetime_to_str, _get_file_permission, _parse_snapshot
+from .._serialize import (
+    get_access_conditions,
+    get_api_version,
+    get_dest_access_conditions,
+    get_rename_smb_properties,
+    get_smb_properties,
+    get_source_access_conditions
+)
+from .._shared.base_client import StorageAccountHostsMixin, parse_query
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers, get_length
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._shared.uploads_async import AsyncIterStreamer, FileChunkUploader, IterStreamer, upload_data_chunks
+from ._download_async import StorageStreamDownloader
+from ._lease_async import ShareLeaseClient
+from ._models import FileProperties, Handle, HandlesPaged
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import ContentSettings, NTFSAttributes
+    from .._shared.base_client import StorageConfiguration
+
+
+async def _upload_file_helper(
+    client: "ShareFileClient",
+    stream: Any,
+    size: Optional[int],
+    metadata: Optional[Dict[str, str]],
+    content_settings: Optional["ContentSettings"],
+    validate_content: bool,
+    timeout: Optional[int],
+    max_concurrency: int,
+    file_settings: "StorageConfiguration",
+    file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+    file_creation_time: Optional[Union[str, datetime]] = None,
+    file_last_write_time: Optional[Union[str, datetime]] = None,
+    file_permission: Optional[str] = None,
+    file_permission_key: Optional[str] = None,
+    progress_hook: Optional[Callable[[int, Optional[int]], None]] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if size is None or size < 0:
+            raise ValueError("A content size must be specified for a File.")
+        response = await client.create_file(
+            size, content_settings=content_settings, metadata=metadata,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            permission_key=file_permission_key,
+            timeout=timeout,
+            **kwargs
+        )
+        if size == 0:
+            return response
+
+        responses = await upload_data_chunks(
+            service=client,
+            uploader_class=FileChunkUploader,
+            total_size=size,
+            chunk_size=file_settings.max_range_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            timeout=timeout,
+            **kwargs
+        )
+        return cast(Dict[str, Any], sorted(responses, key=lambda r: r.get('last_modified'))[-1])
+    except HttpResponseError as error:
+        process_storage_error(error)
+
+
+class ShareFileClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific file, although that file may not yet exist.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the
+        file, use the :func:`from_file_url` classmethod.
+    :param share_name:
+        The name of the share for the file.
+    :type share_name: str
+    :param str file_path:
+        The file path to the file with which to interact. If specified, this value will override
+        a file value specified in the file URL.
+    :param str snapshot:
+        An optional file snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`ShareClient.create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+                          "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name, file_path)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self.file_path = file_path.split('/')
+        self.file_name = self.file_path[-1]
+        self.directory_path = "/".join(self.file_path[:-1])
+
+        self._query_str, credential = self._format_query_string(
+            sas_token, credential, share_snapshot=self.snapshot)
+        super(ShareFileClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_file_url(
+        cls, file_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """A client to interact with a specific file, although that file may not yet exist.
+
+        :param str file_url: The full URI to the file.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        account_url, share_name, file_path, snapshot = _from_file_url(file_url, snapshot)
+        return cls(account_url, share_name, file_path, snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str):
+        return _format_url(self.scheme, hostname, self.share_name, self.file_path, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        file_path: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareFileClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param share_name: The name of the share.
+        :type share_name: str
+        :param str file_path:
+            The file path.
+        :param str snapshot:
+            An optional file snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`ShareClient.create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type AsyncTokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.file.core.windows.net.
+        :returns: A File client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world_async.py
+                :start-after: [START create_file_client]
+                :end-before: [END create_file_client]
+                :language: python
+                :dedent: 12
+                :caption: Creates the file client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def acquire_lease(self, lease_id: Optional[str] = None, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the file does not have an active lease, the File
+        Service creates a lease on the blob and returns a new lease.
+
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The File Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient
+        """
+        kwargs['lease_duration'] = -1
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if the file exists and returns False otherwise.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: True if the file exists, False otherwise.
+        :rtype: bool
+        """
+        try:
+            await self._client.file.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def create_file(
+        self, size: int,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new file.
+
+        Note that it only initializes the file with no content.
+
+        :param int size: Specifies the maximum size for the file,
+            up to 1 TB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START create_file]
+                :end-before: [END create_file]
+                :language: python
+                :dedent: 16
+                :caption: Create a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        content_settings = kwargs.pop('content_settings', None)
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        file_http_headers = None
+        if content_settings:
+            file_http_headers = FileHTTPHeaders(
+                file_cache_control=content_settings.cache_control,
+                file_content_type=content_settings.content_type,
+                file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+                file_content_encoding=content_settings.content_encoding,
+                file_content_language=content_settings.content_language,
+                file_content_disposition=content_settings.content_disposition,
+            )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.create(
+                file_content_length=size,
+                metadata=metadata,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                file_http_headers=file_http_headers,
+                lease_access_conditions=access_conditions,
+                headers=headers,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_file(
+        self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs
+    ) -> Dict[str, Any]:
+        """Uploads a new file.
+
+        :param data:
+            Content of the file.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Length of the file in bytes. Specify its maximum size, up to 1 TiB.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, the default value would be "None" and the attributes will be set to "Archive".
+            Here is an example for when the var type is str: 'Temporary|Archive'.
+            file_attributes value is not case sensitive.
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each range of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START upload_file]
+                :end-before: [END upload_file]
+                :language: python
+                :dedent: 16
+                :caption: Upload a file.
+        """
+        metadata = kwargs.pop('metadata', None)
+        content_settings = kwargs.pop('content_settings', None)
+        max_concurrency = kwargs.pop('max_concurrency', 1)
+        validate_content = kwargs.pop('validate_content', False)
+        progress_hook = kwargs.pop('progress_hook', None)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        if length is None:
+            length = get_length(data)
+        if isinstance(data, bytes):
+            data = data[:length]
+
+        stream: Optional[Any] = None
+        if isinstance(data, bytes):
+            stream = BytesIO(data)
+        elif hasattr(data, "read"):
+            stream = data
+        elif hasattr(data, "__iter__"):
+            stream = IterStreamer(data, encoding=encoding)
+        elif hasattr(data, '__aiter__'):
+            stream = AsyncIterStreamer(cast(AsyncGenerator, data), encoding=encoding)
+        else:
+            raise TypeError(f"Unsupported data type: {type(data)}")
+        return await _upload_file_helper(
+            self,
+            stream,
+            length,
+            metadata,
+            content_settings,
+            validate_content,
+            timeout,
+            max_concurrency,
+            self._config,
+            file_attributes=file_attributes,
+            file_creation_time=file_creation_time,
+            file_last_write_time=file_last_write_time,
+            file_permission=file_permission,
+            file_permission_key=permission_key,
+            progress_hook=progress_hook,
+            **kwargs)
+
+    @distributed_trace_async
+    async def start_copy_from_url(self, source_url: str, **kwargs: Any) -> Dict[str, Any]:
+        """Initiates the copying of data from a source URL into the file
+        referenced by the client.
+
+        The status of this copy operation can be found using the `get_properties`
+        method.
+
+        :param str source_url:
+            Specifies the URL of the source file.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file. This setting can be
+            used if Permission size is <= 8KB, otherwise permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword str permission_key:
+            Key of the permission to be set for the directory/file.
+            This value can be set to "source" to copy the security descriptor from the source file.
+            Otherwise if set, this value will be used to override the source value. If not set, permission value
+            is inherited from the parent directory of the target file.
+            Note: Only one of the file_permission or permission_key should be specified.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            This value can be set to "source" to copy file attributes from the source file to the target file,
+            or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes
+            to set on the target file. If this is not set, the default value is "Archive".
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_attributes: str or ~azure.storage.fileshare.NTFSAttributes
+        :keyword file_creation_time:
+            This value can be set to "source" to copy the creation time from the source file to the target file,
+            or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, creation time will be set to the date time value of the creation
+            (or when it was overwritten) of the target file by copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_creation_time: str or ~datetime.datetime
+        :keyword file_last_write_time:
+            This value can be set to "source" to copy the last write time from the source file to the target file, or
+            a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format.
+            If this is not set, value will be the last write time to the file by the copy engine.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :paramtype file_last_write_time: str or ~datetime.datetime
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.9.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword bool ignore_read_only:
+            Specifies the option to overwrite the target file if it already exists and has read-only attribute set.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword bool set_archive_attribute:
+            Specifies the option to set the archive attribute on the target file.
+            True means the archive attribute will be set on the target file despite attribute
+            overrides or the source file state.
+
+            .. versionadded:: 12.1.0
+
+                This parameter was introduced in API version '2019-07-07'.
+
+        :keyword metadata:
+            Name-value pairs associated with the file as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword file_mode_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the mode bits of the file. Possible values are:
+
+            source - The mode on the destination file is copied from the source file.
+            override - The mode on the destination file is determined via the file_mode keyword.
+        :paramtype file_mode_copy_mode: Literal['source', 'override']
+        :keyword owner_copy_mode:
+            NFS only. Applicable only when the copy source is a File. Determines the copy behavior
+            of the owner and group of the file. Possible values are:
+
+            source - The owner and group on the destination file is copied from the source file.
+            override - The owner and group on the destination file is determined via the owner and group keywords.
+        :paramtype owner_copy_mode: Literal['source', 'override']
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Response after data copying operation has been initiated.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START copy_file_from_url]
+                :end-before: [END copy_file_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Copy a file from a URL
+        """
+        metadata = kwargs.pop('metadata', None)
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        owner = kwargs.pop('owner', None)
+        group = kwargs.pop('group', None)
+        file_mode = kwargs.pop('file_mode', None)
+        file_mode_copy_mode = kwargs.pop('file_mode_copy_mode', None)
+        file_owner_copy_mode = kwargs.pop('owner_copy_mode', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        kwargs.update(get_smb_properties(kwargs))
+        try:
+            return cast(Dict[str, Any], await self._client.file.start_copy(
+                source_url,
+                metadata=metadata,
+                lease_access_conditions=access_conditions,
+                owner=owner,
+                group=group,
+                file_mode=file_mode,
+                file_mode_copy_mode=file_mode_copy_mode,
+                file_owner_copy_mode=file_owner_copy_mode,
+                headers=headers,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def abort_copy(self, copy_id: Union[str, FileProperties], **kwargs: Any) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination file with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID, or an
+            instance of FileProperties.
+        :type copy_id: str or ~azure.storage.fileshare.FileProperties
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        if isinstance(copy_id, FileProperties):
+            copy_id = copy_id.copy.id
+        elif isinstance(copy_id, Dict):
+            copy_id = copy_id['copy_id']
+        try:
+            await self._client.file.abort_copy(copy_id=copy_id,
+                                               lease_access_conditions=access_conditions,
+                                               timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def download_file(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader:
+        """Downloads a file to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the file into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the file.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the file in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the file. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https as https (the default) will
+            already validate. Note that this MD5 hash is not stored with the
+            file. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START download_file]
+                :end-before: [END download_file]
+                :language: python
+                :dedent: 16
+                :caption: Download a file.
+        """
+        if length is not None and offset is None:
+            raise ValueError("Offset value must not be None if length is set.")
+
+        range_end = None
+        if length is not None:
+            if offset is None:
+                raise ValueError("Offset value must not be None if length is set.")
+            range_end = offset + length - 1  # Service actually uses an end-range inclusive index
+
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+
+        downloader = StorageStreamDownloader(
+            client=self._client.file,
+            config=self._config,
+            start_range=offset,
+            end_range=range_end,
+            name=self.file_name,
+            path='/'.join(self.file_path),
+            share=self.share_name,
+            lease_access_conditions=access_conditions,
+            cls=deserialize_file_stream,
+            **kwargs
+        )
+        await downloader._setup()  # pylint: disable=protected-access
+        return downloader
+
+    @distributed_trace_async
+    async def delete_file(self, **kwargs: Any) -> None:
+        """Marks the specified file for deletion. The file is
+        later deleted during garbage collection.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_client_async.py
+                :start-after: [START delete_file]
+                :end-before: [END delete_file]
+                :language: python
+                :dedent: 16
+                :caption: Delete a file.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def rename_file(self, new_name: str, **kwargs: Any) -> "ShareFileClient":
+        """
+        Rename the source file.
+
+        :param str new_name:
+            The new file name.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword bool overwrite:
+            A boolean value for if the destination file already exists, whether this request will
+            overwrite the file or not. If true, the rename will succeed and will overwrite the
+            destination file. If not provided or if false and the destination file does exist, the
+            request will not overwrite the destination file. If provided and the destination file
+            doesn't exist, the rename will succeed.
+        :keyword bool ignore_read_only:
+            A boolean value that specifies whether the ReadOnly attribute on a preexisting destination
+            file should be respected. If true, the rename will succeed, otherwise, a previous file at the
+            destination with the ReadOnly attribute set will cause the rename to fail.
+        :keyword str file_permission:
+            If specified the permission (security descriptor) shall be set for the file. This header
+            can be used if Permission size is <= 8KB, else file_permission_key shall be used.
+            If SDDL is specified as input, it must have owner, group and dacl.
+            A value of 'preserve' can be passed to preserve source permissions.
+            Note: Only one of the file_permission or file_permission_key should be specified.
+        :keyword str file_permission_key:
+            Key of the permission to be set for the file.
+            Note: Only one of the file-permission or file-permission-key should be specified.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_attributes:
+            The file system attributes for the file.
+        :paramtype file_attributes: ~azure.storage.fileshare.NTFSAttributes or str
+        :keyword file_creation_time:
+            Creation time for the file.
+        :paramtype file_creation_time: ~datetime.datetime or str
+        :keyword file_last_write_time:
+            Last write time for the file.
+        :paramtype file_last_write_time: ~datetime.datetime or str
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword str content_type:
+            The Content Type of the new file.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :keyword Dict[str,str] metadata:
+            A name-value pair to associate with a file storage object.
+        :keyword source_lease:
+            Required if the source file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword destination_lease:
+            Required if the destination file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :returns: The new File Client.
+        :rtype: ~azure.storage.fileshare.ShareFileClient
+        """
+        if not new_name:
+            raise ValueError("Please specify a new file name.")
+
+        new_name = new_name.strip('/')
+        new_path_and_query = new_name.split('?')
+        new_file_path = new_path_and_query[0]
+        if len(new_path_and_query) == 2:
+            new_file_sas = new_path_and_query[1] or self._query_str.strip('?')
+        else:
+            new_file_sas = self._query_str.strip('?')
+
+        new_file_client = ShareFileClient(
+            f'{self.scheme}://{self.primary_hostname}', self.share_name, new_file_path,
+            credential=new_file_sas or self.credential, api_version=self.api_version,
+            _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+            _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent
+        )
+
+        kwargs.update(get_rename_smb_properties(kwargs))
+
+        file_http_headers = None
+        content_type = kwargs.pop('content_type', None)
+        if content_type:
+            file_http_headers = FileHTTPHeaders(
+                file_content_type=content_type
+            )
+
+        timeout = kwargs.pop('timeout', None)
+        overwrite = kwargs.pop('overwrite', None)
+        metadata = kwargs.pop('metadata', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None))
+        dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None))
+
+        try:
+            await new_file_client._client.file.rename(  # pylint: disable=protected-access
+                self.url,
+                timeout=timeout,
+                replace_if_exists=overwrite,
+                file_http_headers=file_http_headers,
+                source_lease_access_conditions=source_access_conditions,
+                destination_lease_access_conditions=dest_access_conditions,
+                headers=headers,
+                **kwargs)
+
+            return new_file_client
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_file_properties(self, **kwargs: Any) -> FileProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the file.
+
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: FileProperties
+        :rtype: ~azure.storage.fileshare.FileProperties
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            file_props = cast(FileProperties, await self._client.file.get_properties(
+                sharesnapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=deserialize_file_properties,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        file_props.name = self.file_name
+        file_props.share = self.share_name
+        file_props.snapshot = self.snapshot
+        file_props.path = "/".join(self.file_path)
+        return file_props
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, content_settings: "ContentSettings",
+        file_attributes: Optional[Union[str, "NTFSAttributes"]] = None,
+        file_creation_time: Optional[Union[str, datetime]] = None,
+        file_last_write_time: Optional[Union[str, datetime]] = None,
+        file_permission: Optional[str] = None,
+        permission_key: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets HTTP headers on the file.
+
+        :param ~azure.storage.fileshare.ContentSettings content_settings:
+            ContentSettings object used to set file properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param file_attributes:
+            The file system attributes for files and directories.
+            If not set, indicates preservation of existing values.
+            Here is an example for when the var type is str: 'Temporary|Archive'
+        :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes or None
+        :param file_creation_time: Creation time for the file
+        :type file_creation_time: str or ~datetime.datetime or None
+        :param file_last_write_time: Last write time for the file
+        :type file_last_write_time: str or ~datetime.datetime or None
+        :param file_permission: If specified the permission (security
+            descriptor) shall be set for the directory/file. This header can be
+            used if Permission size is <= 8KB, else x-ms-file-permission-key
+            header shall be used. Default value: Inherit. If SDDL is specified as
+            input, it must have owner, group and dacl. Note: Only one of the
+            x-ms-file-permission or x-ms-file-permission-key should be specified.
+        :type file_permission: str
+        :param permission_key: Key of the permission to be set for the
+            directory/file. Note: Only one of the x-ms-file-permission or
+            x-ms-file-permission-key should be specified.
+        :type permission_key: str
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :keyword file_change_time:
+            Change time for the file. If not specified, change time will be set to the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_change_time: str or ~datetime.datetime
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword str owner:
+            NFS only. The owner of the file.
+        :keyword str group:
+            NFS only. The owning group of the file.
+        :keyword str file_mode:
+            NFS only. The file mode of the file.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        file_content_length = kwargs.pop("size", None)
+        file_http_headers = FileHTTPHeaders(
+            file_cache_control=content_settings.cache_control,
+            file_content_type=content_settings.content_type,
+            file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+            file_content_encoding=content_settings.content_encoding,
+            file_content_language=content_settings.content_language,
+            file_content_disposition=content_settings.content_disposition,
+        )
+        file_permission = _get_file_permission(file_permission, permission_key, None)
+        file_change_time = kwargs.pop('file_change_time', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_http_headers(
+                file_content_length=file_content_length,
+                file_http_headers=file_http_headers,
+                file_attributes=str(file_attributes) if file_attributes is not None else file_attributes,
+                file_creation_time=_datetime_to_str(file_creation_time),
+                file_last_write_time=_datetime_to_str(file_last_write_time),
+                file_change_time=_datetime_to_str(file_change_time),
+                file_permission=file_permission,
+                file_permission_key=permission_key,
+                lease_access_conditions=access_conditions,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_file_metadata(self, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """Sets user-defined metadata for the specified file as one or more
+        name-value pairs.
+
+        Each call to this operation replaces all existing metadata
+        attached to the file. To remove all metadata from the file,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the file as metadata.
+        :type metadata: dict[str, str]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop("headers", {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_metadata(
+                metadata=metadata, lease_access_conditions=access_conditions,
+                timeout=timeout, cls=return_response_headers, headers=headers, **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_range(
+        self, data: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Upload a range of bytes to a file.
+
+        :param bytes data:
+            The data to upload.
+        :param int offset:
+            Start of byte range to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for uploading a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https as https (the default)
+            will already validate. Note that this MD5 hash is not stored with the
+            file.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        validate_content = kwargs.pop('validate_content', False)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        file_last_write_mode = kwargs.pop('file_last_write_mode', None)
+        if isinstance(data, str):
+            data = data.encode(encoding)
+        end_range = offset + length - 1  # Reformat to an inclusive range index
+        content_range = f'bytes={offset}-{end_range}'
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range(
+                range=content_range,
+                content_length=length,
+                optionalbody=data,
+                timeout=timeout,
+                validate_content=validate_content,
+                file_last_written_mode=file_last_write_mode,
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_range_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint.
+
+        :param int offset:
+            Start of byte range to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for updating a section of the file.
+            The range can be up to 4 MB in size.
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies an Azure file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.file.core.windows.net/myshare/mydir/myfile
+            https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword file_last_write_mode:
+            If the file last write time should be preserved or overwritten. Possible values
+            are "preserve" or "now". If not specified, file last write time will be changed to
+            the current date/time.
+
+            .. versionadded:: 12.8.0
+
+                This parameter was introduced in API version '2021-06-08'.
+
+        :paramtype file_last_write_mode: Literal["preserve", "now"]
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after writing to the specified range of the destination Azure File endpoint.
+        :rtype: dict[str, Any]
+        """
+        options = _upload_range_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> List[Dict[str, int]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A list of valid ranges.
+        :rtype: List[dict[str, int]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            ranges = await self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges]
+
+    @distributed_trace_async
+    async def get_ranges_diff(
+        self, previous_sharesnapshot: Union[str, Dict[str, Any]],
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        include_renames: Optional[bool] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a file or snapshot
+        of a file.
+
+        .. versionadded:: 12.6.0
+
+        :param int offset:
+            Specifies the start offset of bytes over which to get ranges.
+        :param int length:
+           Number of bytes to use over which to get ranges.
+        :param str previous_sharesnapshot:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous file snapshot to be compared
+            against a more recent snapshot or the current file.
+        :keyword Optional[bool] include_renames:
+            Only valid if previous_sharesnapshot parameter is provided. Specifies whether the changed ranges for
+            a file that has been renamed or moved between the target snapshot (or live file) and the previous
+            snapshot should be listed. If set to True, the valid changed ranges for the file will be returned.
+            If set to False, the operation will result in a 409 (Conflict) response.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled file ranges, the 2nd element is cleared file ranges.
+        :rtype: tuple[list[dict[str, int]], list[dict[str, int]]]
+        """
+        options = _get_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_sharesnapshot=previous_sharesnapshot,
+            support_rename=include_renames,
+            **kwargs)
+        try:
+            ranges = await self._client.file.get_range_list(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_file_ranges_result(ranges)
+
+    @distributed_trace_async
+    async def clear_range(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Any]:
+        """Clears the specified range and releases the space used in storage for
+        that range.
+
+        :param int offset:
+            Start of byte range to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :param int length:
+            Number of bytes to use for clearing a section of the file.
+            The range can be up to 4 MB in size.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+
+        if offset is None or offset % 512 != 0:
+            raise ValueError("offset must be an integer that aligns with 512 bytes file size")
+        if length is None or length % 512 != 0:
+            raise ValueError("length must be an integer that aligns with 512 bytes file size")
+        end_range = length + offset - 1  # Reformat to an inclusive range index
+        content_range = f"bytes={offset}-{end_range}"
+        try:
+            return cast(Dict[str, Any], await self._client.file.upload_range(
+                timeout=timeout,
+                cls=return_response_headers,
+                content_length=0,
+                optionalbody=None,
+                file_range_write="clear",
+                range=content_range,
+                lease_access_conditions=access_conditions,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def resize_file(self, size: int, **kwargs: Any) -> Dict[str, Any]:
+        """Resizes a file to the specified size.
+
+        :param int size:
+            Size to resize file to (in bytes)
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.1.0
+
+        :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], await self._client.file.set_http_headers(
+                file_content_length=size,
+                file_attributes=None,
+                file_creation_time=None,
+                file_last_write_time=None,
+                file_permission="preserve",
+                lease_access_conditions=access_conditions,
+                cls=return_response_headers,
+                timeout=timeout,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_handles(self, **kwargs: Any) -> AsyncItemPaged[Handle]:
+        """Lists handles for file.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of Handle
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle]
+        """
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop("results_per_page", None)
+        command = functools.partial(
+            self._client.file.list_handles,
+            sharesnapshot=self.snapshot,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=HandlesPaged)
+
+    @distributed_trace_async
+    async def close_handle(self, handle: Union[str, Handle], **kwargs: Any) -> Dict[str, int]:
+        """Close an open file handle.
+
+        :param handle:
+            A specific handle to close.
+        :type handle: str or ~azure.storage.fileshare.Handle
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        if isinstance(handle, Handle):
+            handle_id = handle.id
+        else:
+            handle_id = handle
+        if handle_id == '*':
+            raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.")
+        try:
+            response = await self._client.file.force_close_handles(
+                handle_id,
+                marker=None,
+                sharesnapshot=self.snapshot,
+                cls=return_response_headers,
+                **kwargs
+            )
+            return {
+                'closed_handles_count': response.get('number_of_handles_closed', 0),
+                'failed_handles_count': response.get('number_of_handles_failed', 0)
+            }
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def close_all_handles(self, **kwargs: Any) -> Dict[str, int]:
+        """Close any open file handles.
+
+        This operation will block until the service has closed all open handles.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            The number of handles closed (this may be 0 if the specified handle was not found)
+            and the number of handles failed to close in a dict.
+        :rtype: dict[str, int]
+        """
+        timeout = kwargs.pop('timeout', None)
+        start_time = time.time()
+
+        try_close = True
+        continuation_token = None
+        total_closed = 0
+        total_failed = 0
+        while try_close:
+            try:
+                response = await self._client.file.force_close_handles(
+                    handle_id='*',
+                    timeout=timeout,
+                    marker=continuation_token,
+                    sharesnapshot=self.snapshot,
+                    cls=return_response_headers,
+                    **kwargs
+                )
+            except HttpResponseError as error:
+                process_storage_error(error)
+            continuation_token = response.get('marker')
+            try_close = bool(continuation_token)
+            total_closed += response.get('number_of_handles_closed', 0)
+            total_failed += response.get('number_of_handles_failed', 0)
+            if timeout:
+                timeout = max(0, timeout - (time.time() - start_time))
+        return {
+            'closed_handles_count': total_closed,
+            'failed_handles_count': total_failed
+        }
+
+    @distributed_trace_async
+    async def create_hardlink(
+        self, target: str,
+        *,
+        lease: Optional[Union[ShareLeaseClient, str]] = None,
+        timeout: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """NFS only. Creates a hard link to the file specified by path.
+
+        :param str target:
+            Specifies the path of the target file to which the link will be created, up to 2 KiB in length.
+            It should be the full path of the target starting from the root. The target file must be in the
+            same share and the same storage account.
+        :keyword lease:
+            Required if the file has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str or None
+        :keyword Optional[int] timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: File-updated property dict (ETag and last modified).
+        :rtype: dict[str, Any]
+        """
+        try:
+            return cast(Dict[str, Any], await self._client.file.create_hard_link(
+                target_file=target,
+                lease_access_conditions=lease,
+                timeout=timeout,
+                cls=return_response_headers,
+                **kwargs
+            ))
+        except HttpResponseError as error:
+            process_storage_error(error)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py
new file mode 100644
index 00000000..70d6a392
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_lease_async.py
@@ -0,0 +1,249 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+
+from typing import Union, Optional, Any, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from .._shared.response_handlers import return_response_headers, process_storage_error
+from .._generated.aio.operations import FileOperations, ShareOperations
+
+if TYPE_CHECKING:
+    from azure.storage.fileshare.aio import ShareClient, ShareFileClient
+
+
+class ShareLeaseClient:  # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new ShareLeaseClient.
+
+    This client provides lease operations on a ShareClient or ShareFileClient.
+
+    :ivar str id:
+        The ID of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired.
+    :ivar str etag:
+        The ETag of the lease currently being maintained. This will be `None` if no
+        lease has yet been acquired or modified.
+    :ivar ~datetime.datetime last_modified:
+        The last modified timestamp of the lease currently being maintained.
+        This will be `None` if no lease has yet been acquired or modified.
+
+    :param client:
+        The client of the file or share to lease.
+    :type client: ~azure.storage.fileshare.ShareFileClient or
+        ~azure.storage.fileshare.ShareClient
+    :param str lease_id:
+        A string representing the lease ID of an existing lease. This value does not
+        need to be specified in order to acquire a new lease, or break one.
+    """
+    def __init__(  # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["ShareFileClient", "ShareClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'file_name'):
+            self._client = client._client.file  # type: ignore
+            self._snapshot = None
+        elif hasattr(client, 'share_name'):
+            self._client = client._client.share
+            self._snapshot = client.snapshot
+        else:
+            raise TypeError("Lease must use ShareFileClient or ShareClient.")
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args: Any):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, **kwargs: Any) -> None:
+        """Requests a new lease. This operation establishes and manages a lock on a
+        file or share for write and delete operations. If the file or share does not have an active lease,
+        the File or Share service creates a lease on the file or share. If the file has an active lease,
+        you can only request a new lease using the active lease ID.
+
+
+        If the file or share does not have an active lease, the File or Share service creates a
+        lease on the file and returns a new lease ID.
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be
+            between 15 and 60 seconds. A share lease duration cannot be changed
+            using renew or change. Default is -1 (infinite share lease).
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        try:
+            lease_duration = kwargs.pop('lease_duration', -1)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace_async
+    async def renew(self, **kwargs: Any) -> None:
+        """Renews the share lease.
+
+        The share lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the share. Note that
+        the lease may be renewed even if it has expired as long as the share
+        has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        .. versionadded:: 12.6.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        if isinstance(self._client, FileOperations):
+            raise TypeError("Lease renewal operations are only valid for ShareClient.")
+        try:
+            response = await self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                sharesnapshot=self._snapshot,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def release(self, **kwargs: Any) -> None:
+        """Releases the lease. The lease may be released if the lease ID specified on the request matches
+        that associated with the share or file. Releasing the lease allows another client to immediately acquire
+        the lease for the share or file as soon as the release is complete.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and
+        a new lease ID in x-ms-proposed-lease-id.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The File or Share service raises an error
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        try:
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            response = await self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def break_lease(self, **kwargs: Any) -> int:
+        """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. An infinite lease breaks immediately.
+
+        Once a lease is broken, it cannot be changed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :keyword int lease_break_period:
+            This is the proposed duration of seconds that the share lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the share lease. If longer, the time remaining on the share lease is used.
+            A new share lease will not be available before the break period has
+            expired, but the share lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration share lease breaks after the remaining share lease
+            period elapses, and an infinite share lease breaks immediately.
+
+            .. versionadded:: 12.5.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        try:
+            lease_break_period = kwargs.pop('lease_break_period', None)
+            if self._snapshot:
+                kwargs['sharesnapshot'] = self._snapshot
+            if isinstance(self._client, ShareOperations):
+                kwargs['break_period'] = lease_break_period
+            if isinstance(self._client, FileOperations) and lease_break_period:
+                raise TypeError("Setting a lease break period is only applicable to Share leases.")
+
+            response = await self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time')  # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py
new file mode 100644
index 00000000..dd7335bb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_models.py
@@ -0,0 +1,208 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+
+from typing import (
+    Any, Callable, Dict, List, Optional
+)
+
+from azure.core.async_paging import AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
+from .._generated.models import DirectoryItem
+from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties
+
+
+def _wrap_item(item):
+    if isinstance(item, DirectoryItem):
+        return {'name': item.name, 'is_directory': True}
+    return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
+
+
+class SharePropertiesPaged(AsyncPageIterator):
+    """An iterable of Share properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only shares whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A filename prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results to retrieve per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: List[ShareProperties]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(SharePropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                prefix=self.prefix,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class HandlesPaged(AsyncPageIterator):
+    """An iterable of Handles.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token to retrieve the next page of results.
+    """
+
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results.
+        The available options include "primary" and "secondary"."""
+    current_page: List[Handle]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(HandlesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = [Handle._from_generated(h) for h in self._response.handle_list]  # pylint: disable=protected-access
+        return self._response.next_marker or None, self.current_page
+
+
+class DirectoryPropertiesPaged(AsyncPageIterator):
+    """An iterable for the contents of a directory.
+
+    This iterable will yield dicts for the contents of the directory. The dicts
+    will have the keys 'name' (str) and 'is_directory' (bool).
+    Items that are files (is_directory=False) will have an additional 'content_length' key.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only directories whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of share names to retrieve per call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str] = None
+    """The service URL."""
+    prefix: Optional[str] = None
+    """A file name prefix being used to filter the list."""
+    marker: Optional[str] = None
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int] = None
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str] = None
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str] = None
+    """The location mode being used to list results. The available options include "primary" and "secondary"."""
+    current_page: List[Dict[str, Any]]
+    """The current page of listed results."""
+
+    def __init__(
+        self, command: Callable,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None
+    ) -> None:
+        super(DirectoryPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                prefix=self.prefix,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access
+        self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access
+        return self._response.next_marker or None, self.current_page
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py
new file mode 100644
index 00000000..7f66feb9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_client_async.py
@@ -0,0 +1,991 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import sys
+import warnings
+from typing import (
+    Any, cast, Dict, Literal, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from .._deserialize import deserialize_permission, deserialize_share_properties
+from .._generated.aio import AzureFileStorage
+from .._generated.models import (
+    DeleteSnapshotsOptionType,
+    ShareStats,
+    SignedIdentifier
+)
+from .._models import ShareProtocols
+from .._parser import _parse_snapshot
+from .._share_client_helpers import (
+    _create_permission_for_share_options,
+    _format_url,
+    _from_share_url,
+    _parse_url
+)
+from .._shared.policies_async import ExponentialRetry
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.request_handlers import add_metadata_headers, serialize_iso
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+from .._serialize import get_access_conditions, get_api_version
+from ..aio._lease_async import ShareLeaseClient
+from ._directory_client_async import ShareDirectoryClient
+from ._file_client_async import ShareFileClient
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import AccessPolicy, DirectoryProperties, FileProperties, ShareProperties
+
+
+class ShareClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with a specific share, although that share may not yet exist.
+
+    For operations relating to a specific directory or file in this share, the clients for
+    those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the share,
+        use the :func:`from_share_url` classmethod.
+    :param share_name:
+        The name of the share with which to interact.
+    :type share_name: str
+    :param str snapshot:
+        An optional share snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+    """
+    def __init__(
+        self, account_url: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url, share_name)
+        path_snapshot, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+        self.snapshot = _parse_snapshot(snapshot, path_snapshot)
+        self.share_name = share_name
+        self._query_str, credential = self._format_query_string(
+            sas_token=sas_token, credential=credential, share_snapshot=self.snapshot)
+        super(ShareClient, self).__init__(
+            parsed_url=parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    @classmethod
+    def from_share_url(
+        cls, share_url: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """
+        :param str share_url: The full URI to the share.
+        :param snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+        """
+        account_url, share_name, path_snapshot = _from_share_url(share_url, snapshot)
+        return cls(account_url, share_name, path_snapshot, credential, **kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return _format_url(self.scheme, hostname, self.share_name, self._query_str)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        share_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param str share_name: The name of the share.
+        :param snapshot:
+            The optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :type snapshot: Optional[Union[str, dict[str, Any]]]
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A share client.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share_client_from_conn_string]
+                :end-before: [END create_share_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client from connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs)
+
+    def get_directory_client(self, directory_path: Optional[str] = None) -> ShareDirectoryClient:
+        """Get a client to interact with the specified directory.
+        The directory need not already exist.
+
+        :param str directory_path:
+            Path to the specified directory.
+        :returns: A Directory Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+
+        return ShareDirectoryClient(
+            self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)
+
+    def get_file_client(self, file_path: str) -> ShareFileClient:
+        """Get a client to interact with the specified file.
+        The file need not already exist.
+
+        :param str file_path:
+            Path to the specified file.
+        :returns: A File Client.
+        :rtype: ~azure.storage.fileshare.aio.ShareFileClient
+        """
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable=protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable=protected-access
+        )
+
+        return ShareFileClient(
+            self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot,
+            credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)
+
+    @distributed_trace_async
+    async def acquire_lease(self, **kwargs: Any) -> ShareLeaseClient:
+        """Requests a new lease.
+
+        If the share does not have an active lease, the Share
+        Service creates a lease on the share and returns a new lease.
+
+        .. versionadded:: 12.5.0
+
+        :keyword int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword str lease_id:
+            Proposed lease ID, in a GUID string format. The Share Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A ShareLeaseClient object.
+        :rtype: ~azure.storage.fileshare.ShareLeaseClient
+        """
+        kwargs['lease_duration'] = kwargs.pop('lease_duration', -1)
+        lease_id = kwargs.pop('lease_id', None)
+        lease = ShareLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(**kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def create_share(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a new Share under the account. If a share with the
+        same name already exists, the operation fails.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int quota:
+            The quota to be allotted.
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword protocols:
+            Protocols to enable on the share. Only one protocol can be enabled on the share.
+        :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'.
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share]
+                :end-before: [END create_share]
+                :language: python
+                :dedent: 12
+                :caption: Creates a file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        access_tier = kwargs.pop('access_tier', None)
+        timeout = kwargs.pop('timeout', None)
+        root_squash = kwargs.pop('root_squash', None)
+        protocols = kwargs.pop('protocols', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]:
+            raise ValueError("The enabled protocol must be set to either SMB or NFS.")
+        if root_squash and protocols not in ['NFS', ShareProtocols.NFS]:
+            raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.")
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+
+        try:
+            return cast(Dict[str, Any], await self._client.share.create(
+                timeout=timeout,
+                metadata=metadata,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                enabled_protocols=protocols,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_snapshot(self, **kwargs: Any) -> Dict[str, Any]:
+        """Creates a snapshot of the share.
+
+        A snapshot is a read-only version of a share that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a share as it appears at a moment in time.
+
+        A snapshot of a share has the same name as the base share from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :keyword metadata:
+            Name-value pairs associated with the share as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: Share-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START create_share_snapshot]
+                :end-before: [END create_share_snapshot]
+                :language: python
+                :dedent: 16
+                :caption: Creates a snapshot of the file share.
+        """
+        metadata = kwargs.pop('metadata', None)
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.share.create_snapshot(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_share(
+        self, delete_snapshots: Optional[Union[bool, Literal['include', 'include-leased']]] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param delete_snapshots:
+            Indicates if snapshots are to be deleted. If "True" or enum "include", snapshots will
+            be deleted (but not include leased). To include leased snapshots, specify the "include-leased"
+            enum.
+        :type delete_snapshots:
+            Optional[Union[bool, Literal['include', 'include-leased']]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START delete_share]
+                :end-before: [END delete_share]
+                :language: python
+                :dedent: 16
+                :caption: Deletes the share and any snapshots.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        delete_include = None
+        if isinstance(delete_snapshots, bool) and delete_snapshots:
+            delete_include = DeleteSnapshotsOptionType.INCLUDE
+        else:
+            if delete_snapshots == 'include':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE
+            elif delete_snapshots == 'include-leased':
+                delete_include = DeleteSnapshotsOptionType.INCLUDE_LEASED
+        try:
+            await self._client.share.delete(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                delete_snapshots=delete_include,
+                lease_access_conditions=access_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_properties(self, **kwargs: Any) -> "ShareProperties":
+        """Returns all user-defined metadata and system properties for the
+        specified share. The data returned does not include the shares's
+        list of files or directories.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: The share properties.
+        :rtype: ~azure.storage.fileshare.ShareProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_hello_world_async.py
+                :start-after: [START get_share_properties]
+                :end-before: [END get_share_properties]
+                :language: python
+                :dedent: 16
+                :caption: Gets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            props = cast("ShareProperties", await self._client.share.get_properties(
+                timeout=timeout,
+                sharesnapshot=self.snapshot,
+                cls=deserialize_share_properties,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+        props.name = self.share_name
+        props.snapshot = self.snapshot
+        return props
+
+    @distributed_trace_async
+    async def set_share_quota(self, quota: int, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the quota for the share.
+
+        :param int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_quota]
+                :end-before: [END set_share_quota]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share quota.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=None,
+                cls=return_response_headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_share_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Sets the share properties.
+
+        .. versionadded:: 12.3.0
+
+        :keyword access_tier:
+            Specifies the access tier of the share.
+            Possible values: 'TransactionOptimized', 'Hot', 'Cool', 'Premium'
+        :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
+        :keyword int quota:
+            Specifies the maximum size of the share, in gigabytes.
+            Must be greater than 0, and less than or equal to 5TB.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword root_squash:
+            Root squash to set on the share.
+            Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'
+        :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+        :keyword bool paid_bursting_enabled: This property enables paid bursting.
+        :keyword int paid_bursting_bandwidth_mibps: The maximum throughput the file share can support in MiB/s.
+        :keyword int paid_bursting_iops: The maximum IOPS the file share can support.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_properties]
+                :end-before: [END set_share_properties]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share properties.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        access_tier = kwargs.pop('access_tier', None)
+        quota = kwargs.pop('quota', None)
+        root_squash = kwargs.pop('root_squash', None)
+        paid_bursting_bandwidth_mibps = kwargs.pop('paid_bursting_bandwidth_mibps', None)
+        paid_bursting_iops = kwargs.pop('paid_bursting_iops', None)
+        share_provisioned_iops = kwargs.pop('provisioned_iops', None)
+        share_provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        if all(parameter is None for parameter in [access_tier, quota, root_squash]):
+            raise ValueError("set_share_properties should be called with at least one parameter.")
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_properties(
+                timeout=timeout,
+                quota=quota,
+                access_tier=access_tier,
+                root_squash=root_squash,
+                lease_access_conditions=access_conditions,
+                paid_bursting_max_bandwidth_mibps=paid_bursting_bandwidth_mibps,
+                paid_bursting_max_iops=paid_bursting_iops,
+                share_provisioned_iops=share_provisioned_iops,
+                share_provisioned_bandwidth_mibps=share_provisioned_bandwidth_mibps,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_share_metadata(self, metadata: Dict[str, str], **kwargs: Any) -> Dict[str, Any]:
+        """Sets the metadata for the share.
+
+        Each call to this operation replaces all existing metadata
+        attached to the share. To remove all metadata from the share,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            Name-value pairs associated with the share as metadata.
+        :type metadata: dict[str, str]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START set_share_metadata]
+                :end-before: [END set_share_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Sets the share metadata.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_metadata(
+                timeout=timeout,
+                cls=return_response_headers,
+                headers=headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the share. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = await self._client.share.get_access_policy(
+                timeout=timeout,
+                cls=return_headers_and_deserialized,
+                lease_access_conditions=access_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('share_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace_async
+    async def set_share_access_policy(
+        self, signed_identifiers: Dict[str, "AccessPolicy"],
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets the permissions for the share, or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether files in a share may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the share. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.fileshare.AccessPolicy]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :returns: Share-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value))
+        try:
+            return cast(Dict[str, Any], await self._client.share.set_access_policy(
+                share_acl=identifiers or None,
+                timeout=timeout,
+                cls=return_response_headers,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_share_stats(self, **kwargs: Any) -> int:
+        """Gets the approximate size of the data stored on the share in bytes.
+
+        Note that this value may not include all recently created
+        or recently re-sized files.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the share has an active lease. Value can be a ShareLeaseClient object
+            or the lease ID as a string.
+
+            .. versionadded:: 12.5.0
+
+            This keyword argument was introduced in API version '2020-08-04'.
+
+        :return: The approximate size of the data (in bytes) stored on the share.
+        :rtype: int
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = cast(ShareStats, await self._client.share.get_statistics(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                **kwargs))
+            return stats.share_usage_bytes
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_directories_and_files(
+        self, directory_name: Optional[str] = None,
+        name_starts_with: Optional[str] = None,
+        marker: Optional[str] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[Union["DirectoryProperties", "FileProperties"]]:
+        """Lists the directories and files under the share.
+
+        :param str directory_name:
+            Name of a directory.
+        :param str name_starts_with:
+            Filters the results to return only directories whose names
+            begin with the specified prefix.
+        :param str marker:
+            An opaque continuation token. This value can be retrieved from the
+            next_marker field of a previous generator object. If specified,
+            this generator will begin returning results from this point.
+        :keyword List[str] include:
+            Include this parameter to specify one or more datasets to include in the response.
+            Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey".
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword bool include_extended_info:
+            If this is set to true, file id will be returned in listed results.
+
+            .. versionadded:: 12.6.0
+
+            This keyword argument was introduced in API version '2020-10-02'.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties
+        :rtype: ~azure.core.paging.ItemPaged[Union[DirectoryProperties, FileProperties]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_share_async.py
+                :start-after: [START share_list_files_in_dir]
+                :end-before: [END share_list_files_in_dir]
+                :language: python
+                :dedent: 16
+                :caption: List directories and files in the share.
+        """
+        timeout = kwargs.pop('timeout', None)
+        directory = self.get_directory_client(directory_name)
+        return directory.list_directories_and_files(
+            name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def create_permission_for_share(self, file_permission: str, **kwargs: Any) -> Optional[str]:
+        """Create a permission (a security descriptor) at the share level.
+
+        This 'permission' can be used for the files/directories in the share.
+        If a 'permission' already exists, it shall return the key of it, else
+        creates a new permission at the share level and return its key.
+
+        :param str file_permission:
+            File permission, a Portable SDDL
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission key
+        :rtype: str or None
+        """
+        timeout = kwargs.pop('timeout', None)
+        options = _create_permission_for_share_options(file_permission, timeout=timeout, **kwargs)
+        try:
+            return cast(Optional[str], await self._client.share.create_permission(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_permission_for_share(self, permission_key: str, **kwargs: Any) -> str:
+        """Get a permission (a security descriptor) for a given key.
+
+        This 'permission' can be used for the files/directories in the share.
+
+        :param str permission_key:
+            Key of the file permission to retrieve
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword file_permission_format:
+            Specifies the format in which the permission is returned. If not specified, SDDL will be the default.
+        :paramtype file_permission_format: Literal['sddl', 'binary']
+        :returns: A file permission (a portable SDDL)
+        :rtype: str
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return cast(str, await self._client.share.get_permission(
+                file_permission_key=permission_key,
+                cls=deserialize_permission,
+                timeout=timeout,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_directory(self, directory_name: str, **kwargs: Any) -> ShareDirectoryClient:
+        """Creates a directory in the share and returns a client to interact
+        with the directory.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword metadata:
+            Name-value pairs associated with the directory as metadata.
+        :paramtype metadata: Optional[dict[str, str]]
+        :keyword str owner:
+            NFS only. The owner of the directory.
+        :keyword str group:
+            NFS only. The owning group of the directory.
+        :keyword str file_mode:
+            NFS only. The file mode of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: ShareDirectoryClient
+        :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient
+        """
+        directory = self.get_directory_client(directory_name)
+        kwargs.setdefault('merge_span', True)
+        await directory.create_directory(**kwargs)
+        return directory
+
+    @distributed_trace_async
+    async def delete_directory(self, directory_name: str, **kwargs: Any) -> None:
+        """Marks the directory for deletion. The directory is
+        later deleted during garbage collection.
+
+        :param str directory_name:
+            The name of the directory.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        directory = self.get_directory_client(directory_name)
+        await directory.delete_directory(**kwargs)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py
new file mode 100644
index 00000000..bf33ac78
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/aio/_share_service_client_async.py
@@ -0,0 +1,490 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import sys
+import warnings
+from typing import (
+    Union, Optional, Any, Dict, List,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.pipeline import AsyncPipeline
+from .._generated.aio import AzureFileStorage
+from .._generated.models import StorageServiceProperties
+from .._models import CorsRule, service_properties_deserialize, ShareProperties
+from .._serialize import get_api_version
+from .._share_service_client_helpers import _parse_url
+from .._shared.base_client import StorageAccountHostsMixin, parse_query
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.response_handlers import process_storage_error
+from ._models import SharePropertiesPaged
+from ._share_client_async import ShareClient
+
+if sys.version_info >= (3, 8):
+    from typing import Literal
+else:
+    from typing_extensions import Literal
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from .._models import Metrics, ShareProtocolSettings
+
+
+class ShareServiceClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin):  # type: ignore [misc]
+    """A client to interact with the File Share Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete shares within the account.
+    For operations relating to a specific share, a client for that entity
+    can also be retrieved using the :func:`get_share_client` function.
+
+    :param str account_url:
+        The URL to the file share storage account. Any other entities included
+        in the URL path (e.g. share or file) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials_async.AsyncTokenCredential or
+        str or dict[str, str] or None
+    :keyword token_intent:
+        Required when using `AsyncTokenCredential` for authentication and ignored for other forms of authentication.
+        Specifies the intent for all requests when using `AsyncTokenCredential` authentication. Possible values are:
+
+        backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory
+                 ACLs are bypassed and full permissions are granted. User must also have required RBAC permission.
+
+    :paramtype token_intent: Literal['backup']
+    :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI.
+    :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.1.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/file_samples_authentication_async.py
+            :start-after: [START create_share_service_client]
+            :end-before: [END create_share_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Create the share service client with url and credential.
+    """
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        *,
+        token_intent: Optional[Literal['backup']] = None,
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        loop = kwargs.pop('loop', None)
+        if loop and sys.version_info >= (3, 8):
+            warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level"
+            "APIs in Python 3.8 and is no longer supported.", DeprecationWarning)
+
+        if hasattr(credential, 'get_token') and not token_intent:
+            raise ValueError("'token_intent' keyword is required when 'credential' is an AsyncTokenCredential.")
+        parsed_url = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        if not sas_token and not credential:
+            raise ValueError(
+                'You need to provide either an account shared key or SAS token when creating a storage service.')
+
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ShareServiceClient, self).__init__(
+            parsed_url, service='file-share', credential=credential, **kwargs)
+        self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None)
+        self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None)
+        self.file_request_intent = token_intent
+        self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline,
+                                        allow_trailing_dot=self.allow_trailing_dot,
+                                        allow_source_trailing_dot=self.allow_source_trailing_dot,
+                                        file_request_intent=self.file_request_intent)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+
+    def _format_url(self, hostname: str) -> str:
+        """Format the endpoint URL according to the current location mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ShareServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a AsyncTokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :returns: A File Share service client.
+        :rtype: ~azure.storage.fileshare.ShareServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_authentication_async.py
+                :start-after: [START create_share_service_client_from_conn_string]
+                :end-before: [END create_share_service_client_from_conn_string]
+                :language: python
+                :dedent: 8
+                :caption: Create the share service client with connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's File Share service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary containing file service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START get_service_properties]
+                :end-before: [END get_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Get file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_service_properties(
+        self, hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        protocol: Optional["ShareProtocolSettings"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's File Share service, including
+        Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for files.
+        :type hour_metrics: ~azure.storage.fileshare.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for files.
+        :type minute_metrics: ~azure.storage.fileshare.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.fileshare.CorsRule]
+        :param protocol:
+            Sets protocol settings
+        :type protocol: ~azure.storage.fileshare.ShareProtocolSettings
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START set_service_properties]
+                :end-before: [END set_service_properties]
+                :language: python
+                :dedent: 8
+                :caption: Sets file share service properties.
+        """
+        timeout = kwargs.pop('timeout', None)
+        props = StorageServiceProperties(
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors),  # pylint: disable=protected-access
+            protocol=protocol
+        )
+        try:
+            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_shares(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: Optional[bool] = False,
+        include_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> AsyncItemPaged[ShareProperties]:
+        """Returns auto-paging iterable of dict-like ShareProperties under the specified account.
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all shares have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only shares whose names
+            begin with the specified name_starts_with.
+        :param bool include_metadata:
+            Specifies that share metadata be returned in the response.
+        :param bool include_snapshots:
+            Specifies that share snapshot be returned in the response.
+        :keyword bool include_deleted:
+            Specifies that deleted shares be returned in the response.
+            This is only for share soft delete enabled account.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ShareProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_list_shares]
+                :end-before: [END fsc_list_shares]
+                :language: python
+                :dedent: 16
+                :caption: List shares in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        include = []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        if include_metadata:
+            include.append('metadata')
+        if include_snapshots:
+            include.append('snapshots')
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_shares_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, prefix=name_starts_with, results_per_page=results_per_page,
+            page_iterator_class=SharePropertiesPaged)
+
+    @distributed_trace_async
+    async def create_share(self, share_name: str, **kwargs: Any) -> ShareClient:
+        """Creates a new share under the specified account. If the share
+        with the same name already exists, the operation fails. Returns a client with
+        which to interact with the newly created share.
+
+        :param str share_name: The name of the share to create.
+        :keyword dict[str, str] metadata:
+            A dict with name_value pairs to associate with the
+            share as metadata. Example:{'Category':'test'}
+        :keyword int quota:
+            Quota in bytes.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :keyword int provisioned_iops: The provisioned IOPS of the share, stored on the share object.
+        :keyword int provisioned_bandwidth_mibps: The provisioned throughput of the share, stored on the share object.
+        :return: A ShareClient for the newly created Share.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_create_shares]
+                :end-before: [END fsc_create_shares]
+                :language: python
+                :dedent: 12
+                :caption: Create a share in the file share service.
+        """
+        metadata = kwargs.pop('metadata', None)
+        quota = kwargs.pop('quota', None)
+        timeout = kwargs.pop('timeout', None)
+        provisioned_iops = kwargs.pop('provisioned_iops', None)
+        provisioned_bandwidth_mibps = kwargs.pop('provisioned_bandwidth_mibps', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        await share.create_share(
+            metadata=metadata,
+            quota=quota,
+            timeout=timeout,
+            provisioned_iops=provisioned_iops,
+            provisioned_bandwidth_mibps=provisioned_bandwidth_mibps,
+            **kwargs
+        )
+        return share
+
+    @distributed_trace_async
+    async def delete_share(
+        self, share_name: Union[ShareProperties, str],
+        delete_snapshots: Optional[bool] = False,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified share for deletion. The share is
+        later deleted during garbage collection.
+
+        :param share_name:
+            The share to delete. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share_name: str or ~azure.storage.fileshare.ShareProperties
+        :param bool delete_snapshots:
+            Indicates if snapshots are to be deleted.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START fsc_delete_shares]
+                :end-before: [END fsc_delete_shares]
+                :language: python
+                :dedent: 16
+                :caption: Delete a share in the file share service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        share = self.get_share_client(share_name)
+        kwargs.setdefault('merge_span', True)
+        await share.delete_share(
+            delete_snapshots=delete_snapshots, timeout=timeout, **kwargs)
+
+    @distributed_trace_async
+    async def undelete_share(self, deleted_share_name: str, deleted_share_version: str, **kwargs: Any) -> ShareClient:
+        """Restores soft-deleted share.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.2.0
+
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_share_name:
+            Specifies the name of the deleted share to restore.
+        :param str deleted_share_version:
+            Specifies the version of the deleted share to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-file-share
+            #other-client--per-operation-configuration>`__.
+        :return: A ShareClient for the undeleted Share.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+        """
+        share = self.get_share_client(deleted_share_name)
+        try:
+            await share._client.share.restore(deleted_share_name=deleted_share_name,  # pylint: disable=protected-access
+                                              deleted_share_version=deleted_share_version,
+                                              timeout=kwargs.pop('timeout', None), **kwargs)
+            return share
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_share_client(
+        self, share: Union[ShareProperties, str],
+        snapshot: Optional[Union[Dict[str, Any], str]] = None
+    ) -> ShareClient:
+        """Get a client to interact with the specified share.
+        The share need not already exist.
+
+        :param share:
+            The share. This can either be the name of the share,
+            or an instance of ShareProperties.
+        :type share: str or ~azure.storage.fileshare.ShareProperties
+        :param str snapshot:
+            An optional share snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :returns: A ShareClient.
+        :rtype: ~azure.storage.fileshare.aio.ShareClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/file_samples_service_async.py
+                :start-after: [START get_share_client]
+                :end-before: [END get_share_client]
+                :language: python
+                :dedent: 8
+                :caption: Gets the share client.
+        """
+        if isinstance(share, ShareProperties):
+            share_name = share.name
+        else:
+            share_name = share
+
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport),  # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return ShareClient(
+            self.url, share_name=share_name, snapshot=snapshot, credential=self.credential,
+            api_version=self.api_version, _hosts=self._hosts, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot,
+            allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/fileshare/py.typed b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/fileshare/py.typed