about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/anyio/_core
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/anyio/_core')
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_asyncio_selector_thread.py167
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_eventloop.py166
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_exceptions.py126
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_fileio.py742
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_resources.py18
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_signals.py27
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_sockets.py792
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_streams.py52
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py202
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_synchronization.py732
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_tasks.py158
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_tempfile.py616
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_testing.py78
-rw-r--r--.venv/lib/python3.12/site-packages/anyio/_core/_typedattr.py81
15 files changed, 3957 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/__init__.py b/.venv/lib/python3.12/site-packages/anyio/_core/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_asyncio_selector_thread.py b/.venv/lib/python3.12/site-packages/anyio/_core/_asyncio_selector_thread.py
new file mode 100644
index 00000000..9f35bae5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_asyncio_selector_thread.py
@@ -0,0 +1,167 @@
+from __future__ import annotations
+
+import asyncio
+import socket
+import threading
+from collections.abc import Callable
+from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+    from _typeshed import FileDescriptorLike
+
+_selector_lock = threading.Lock()
+_selector: Selector | None = None
+
+
+class Selector:
+    def __init__(self) -> None:
+        self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
+        self._selector = DefaultSelector()
+        self._send, self._receive = socket.socketpair()
+        self._send.setblocking(False)
+        self._receive.setblocking(False)
+        # This somewhat reduces the amount of memory wasted queueing up data
+        # for wakeups. With these settings, maximum number of 1-byte sends
+        # before getting BlockingIOError:
+        #   Linux 4.8: 6
+        #   macOS (darwin 15.5): 1
+        #   Windows 10: 525347
+        # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
+        # blocking, even on non-blocking sockets, so don't do that.)
+        self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
+        self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
+        # On Windows this is a TCP socket so this might matter. On other
+        # platforms this fails b/c AF_UNIX sockets aren't actually TCP.
+        try:
+            self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+        except OSError:
+            pass
+
+        self._selector.register(self._receive, EVENT_READ)
+        self._closed = False
+
+    def start(self) -> None:
+        self._thread.start()
+        threading._register_atexit(self._stop)  # type: ignore[attr-defined]
+
+    def _stop(self) -> None:
+        global _selector
+        self._closed = True
+        self._notify_self()
+        self._send.close()
+        self._thread.join()
+        self._selector.unregister(self._receive)
+        self._receive.close()
+        self._selector.close()
+        _selector = None
+        assert not self._selector.get_map(), (
+            "selector still has registered file descriptors after shutdown"
+        )
+
+    def _notify_self(self) -> None:
+        try:
+            self._send.send(b"\x00")
+        except BlockingIOError:
+            pass
+
+    def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
+        loop = asyncio.get_running_loop()
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
+        else:
+            if EVENT_READ in key.data:
+                raise ValueError(
+                    "this file descriptor is already registered for reading"
+                )
+
+            key.data[EVENT_READ] = loop, callback
+            self._selector.modify(fd, key.events | EVENT_READ, key.data)
+
+        self._notify_self()
+
+    def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
+        loop = asyncio.get_running_loop()
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
+        else:
+            if EVENT_WRITE in key.data:
+                raise ValueError(
+                    "this file descriptor is already registered for writing"
+                )
+
+            key.data[EVENT_WRITE] = loop, callback
+            self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
+
+        self._notify_self()
+
+    def remove_reader(self, fd: FileDescriptorLike) -> bool:
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+
+        if new_events := key.events ^ EVENT_READ:
+            del key.data[EVENT_READ]
+            self._selector.modify(fd, new_events, key.data)
+        else:
+            self._selector.unregister(fd)
+
+        return True
+
+    def remove_writer(self, fd: FileDescriptorLike) -> bool:
+        try:
+            key = self._selector.get_key(fd)
+        except KeyError:
+            return False
+
+        if new_events := key.events ^ EVENT_WRITE:
+            del key.data[EVENT_WRITE]
+            self._selector.modify(fd, new_events, key.data)
+        else:
+            self._selector.unregister(fd)
+
+        return True
+
+    def run(self) -> None:
+        while not self._closed:
+            for key, events in self._selector.select():
+                if key.fileobj is self._receive:
+                    try:
+                        while self._receive.recv(4096):
+                            pass
+                    except BlockingIOError:
+                        pass
+
+                    continue
+
+                if events & EVENT_READ:
+                    loop, callback = key.data[EVENT_READ]
+                    self.remove_reader(key.fd)
+                    try:
+                        loop.call_soon_threadsafe(callback)
+                    except RuntimeError:
+                        pass  # the loop was already closed
+
+                if events & EVENT_WRITE:
+                    loop, callback = key.data[EVENT_WRITE]
+                    self.remove_writer(key.fd)
+                    try:
+                        loop.call_soon_threadsafe(callback)
+                    except RuntimeError:
+                        pass  # the loop was already closed
+
+
+def get_selector() -> Selector:
+    global _selector
+
+    with _selector_lock:
+        if _selector is None:
+            _selector = Selector()
+            _selector.start()
+
+        return _selector
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_eventloop.py b/.venv/lib/python3.12/site-packages/anyio/_core/_eventloop.py
new file mode 100644
index 00000000..6dcb4589
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_eventloop.py
@@ -0,0 +1,166 @@
+from __future__ import annotations
+
+import math
+import sys
+import threading
+from collections.abc import Awaitable, Callable, Generator
+from contextlib import contextmanager
+from importlib import import_module
+from typing import TYPE_CHECKING, Any, TypeVar
+
+import sniffio
+
+if sys.version_info >= (3, 11):
+    from typing import TypeVarTuple, Unpack
+else:
+    from typing_extensions import TypeVarTuple, Unpack
+
+if TYPE_CHECKING:
+    from ..abc import AsyncBackend
+
+# This must be updated when new backends are introduced
+BACKENDS = "asyncio", "trio"
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+
+threadlocals = threading.local()
+loaded_backends: dict[str, type[AsyncBackend]] = {}
+
+
+def run(
+    func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+    *args: Unpack[PosArgsT],
+    backend: str = "asyncio",
+    backend_options: dict[str, Any] | None = None,
+) -> T_Retval:
+    """
+    Run the given coroutine function in an asynchronous event loop.
+
+    The current thread must not be already running an event loop.
+
+    :param func: a coroutine function
+    :param args: positional arguments to ``func``
+    :param backend: name of the asynchronous event loop implementation – currently
+        either ``asyncio`` or ``trio``
+    :param backend_options: keyword arguments to call the backend ``run()``
+        implementation with (documented :ref:`here <backend options>`)
+    :return: the return value of the coroutine function
+    :raises RuntimeError: if an asynchronous event loop is already running in this
+        thread
+    :raises LookupError: if the named backend is not found
+
+    """
+    try:
+        asynclib_name = sniffio.current_async_library()
+    except sniffio.AsyncLibraryNotFoundError:
+        pass
+    else:
+        raise RuntimeError(f"Already running {asynclib_name} in this thread")
+
+    try:
+        async_backend = get_async_backend(backend)
+    except ImportError as exc:
+        raise LookupError(f"No such backend: {backend}") from exc
+
+    token = None
+    if sniffio.current_async_library_cvar.get(None) is None:
+        # Since we're in control of the event loop, we can cache the name of the async
+        # library
+        token = sniffio.current_async_library_cvar.set(backend)
+
+    try:
+        backend_options = backend_options or {}
+        return async_backend.run(func, args, {}, backend_options)
+    finally:
+        if token:
+            sniffio.current_async_library_cvar.reset(token)
+
+
+async def sleep(delay: float) -> None:
+    """
+    Pause the current task for the specified duration.
+
+    :param delay: the duration, in seconds
+
+    """
+    return await get_async_backend().sleep(delay)
+
+
+async def sleep_forever() -> None:
+    """
+    Pause the current task until it's cancelled.
+
+    This is a shortcut for ``sleep(math.inf)``.
+
+    .. versionadded:: 3.1
+
+    """
+    await sleep(math.inf)
+
+
+async def sleep_until(deadline: float) -> None:
+    """
+    Pause the current task until the given time.
+
+    :param deadline: the absolute time to wake up at (according to the internal
+        monotonic clock of the event loop)
+
+    .. versionadded:: 3.1
+
+    """
+    now = current_time()
+    await sleep(max(deadline - now, 0))
+
+
+def current_time() -> float:
+    """
+    Return the current value of the event loop's internal clock.
+
+    :return: the clock value (seconds)
+
+    """
+    return get_async_backend().current_time()
+
+
+def get_all_backends() -> tuple[str, ...]:
+    """Return a tuple of the names of all built-in backends."""
+    return BACKENDS
+
+
+def get_cancelled_exc_class() -> type[BaseException]:
+    """Return the current async library's cancellation exception class."""
+    return get_async_backend().cancelled_exception_class()
+
+
+#
+# Private API
+#
+
+
+@contextmanager
+def claim_worker_thread(
+    backend_class: type[AsyncBackend], token: object
+) -> Generator[Any, None, None]:
+    threadlocals.current_async_backend = backend_class
+    threadlocals.current_token = token
+    try:
+        yield
+    finally:
+        del threadlocals.current_async_backend
+        del threadlocals.current_token
+
+
+def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
+    if asynclib_name is None:
+        asynclib_name = sniffio.current_async_library()
+
+    # We use our own dict instead of sys.modules to get the already imported back-end
+    # class because the appropriate modules in sys.modules could potentially be only
+    # partially initialized
+    try:
+        return loaded_backends[asynclib_name]
+    except KeyError:
+        module = import_module(f"anyio._backends._{asynclib_name}")
+        loaded_backends[asynclib_name] = module.backend_class
+        return module.backend_class
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_exceptions.py b/.venv/lib/python3.12/site-packages/anyio/_core/_exceptions.py
new file mode 100644
index 00000000..16b94482
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_exceptions.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import sys
+from collections.abc import Generator
+from textwrap import dedent
+from typing import Any
+
+if sys.version_info < (3, 11):
+    from exceptiongroup import BaseExceptionGroup
+
+
+class BrokenResourceError(Exception):
+    """
+    Raised when trying to use a resource that has been rendered unusable due to external
+    causes (e.g. a send stream whose peer has disconnected).
+    """
+
+
+class BrokenWorkerProcess(Exception):
+    """
+    Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
+    otherwise misbehaves.
+    """
+
+
+class BrokenWorkerIntepreter(Exception):
+    """
+    Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is
+    raised in the subinterpreter.
+    """
+
+    def __init__(self, excinfo: Any):
+        # This was adapted from concurrent.futures.interpreter.ExecutionFailed
+        msg = excinfo.formatted
+        if not msg:
+            if excinfo.type and excinfo.msg:
+                msg = f"{excinfo.type.__name__}: {excinfo.msg}"
+            else:
+                msg = excinfo.type.__name__ or excinfo.msg
+
+        super().__init__(msg)
+        self.excinfo = excinfo
+
+    def __str__(self) -> str:
+        try:
+            formatted = self.excinfo.errdisplay
+        except Exception:
+            return super().__str__()
+        else:
+            return dedent(
+                f"""
+                {super().__str__()}
+
+                Uncaught in the interpreter:
+
+                {formatted}
+                """.strip()
+            )
+
+
+class BusyResourceError(Exception):
+    """
+    Raised when two tasks are trying to read from or write to the same resource
+    concurrently.
+    """
+
+    def __init__(self, action: str):
+        super().__init__(f"Another task is already {action} this resource")
+
+
+class ClosedResourceError(Exception):
+    """Raised when trying to use a resource that has been closed."""
+
+
+class DelimiterNotFound(Exception):
+    """
+    Raised during
+    :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
+    maximum number of bytes has been read without the delimiter being found.
+    """
+
+    def __init__(self, max_bytes: int) -> None:
+        super().__init__(
+            f"The delimiter was not found among the first {max_bytes} bytes"
+        )
+
+
+class EndOfStream(Exception):
+    """
+    Raised when trying to read from a stream that has been closed from the other end.
+    """
+
+
+class IncompleteRead(Exception):
+    """
+    Raised during
+    :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
+    :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
+    connection is closed before the requested amount of bytes has been read.
+    """
+
+    def __init__(self) -> None:
+        super().__init__(
+            "The stream was closed before the read operation could be completed"
+        )
+
+
+class TypedAttributeLookupError(LookupError):
+    """
+    Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
+    is not found and no default value has been given.
+    """
+
+
+class WouldBlock(Exception):
+    """Raised by ``X_nowait`` functions if ``X()`` would block."""
+
+
+def iterate_exceptions(
+    exception: BaseException,
+) -> Generator[BaseException, None, None]:
+    if isinstance(exception, BaseExceptionGroup):
+        for exc in exception.exceptions:
+            yield from iterate_exceptions(exc)
+    else:
+        yield exception
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_fileio.py b/.venv/lib/python3.12/site-packages/anyio/_core/_fileio.py
new file mode 100644
index 00000000..a0d61984
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_fileio.py
@@ -0,0 +1,742 @@
+from __future__ import annotations
+
+import os
+import pathlib
+import sys
+from collections.abc import (
+    AsyncIterator,
+    Callable,
+    Iterable,
+    Iterator,
+    Sequence,
+)
+from dataclasses import dataclass
+from functools import partial
+from os import PathLike
+from typing import (
+    IO,
+    TYPE_CHECKING,
+    Any,
+    AnyStr,
+    ClassVar,
+    Final,
+    Generic,
+    overload,
+)
+
+from .. import to_thread
+from ..abc import AsyncResource
+
+if TYPE_CHECKING:
+    from types import ModuleType
+
+    from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
+else:
+    ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
+
+
+class AsyncFile(AsyncResource, Generic[AnyStr]):
+    """
+    An asynchronous file object.
+
+    This class wraps a standard file object and provides async friendly versions of the
+    following blocking methods (where available on the original file object):
+
+    * read
+    * read1
+    * readline
+    * readlines
+    * readinto
+    * readinto1
+    * write
+    * writelines
+    * truncate
+    * seek
+    * tell
+    * flush
+
+    All other methods are directly passed through.
+
+    This class supports the asynchronous context manager protocol which closes the
+    underlying file at the end of the context block.
+
+    This class also supports asynchronous iteration::
+
+        async with await open_file(...) as f:
+            async for line in f:
+                print(line)
+    """
+
+    def __init__(self, fp: IO[AnyStr]) -> None:
+        self._fp: Any = fp
+
+    def __getattr__(self, name: str) -> object:
+        return getattr(self._fp, name)
+
+    @property
+    def wrapped(self) -> IO[AnyStr]:
+        """The wrapped file object."""
+        return self._fp
+
+    async def __aiter__(self) -> AsyncIterator[AnyStr]:
+        while True:
+            line = await self.readline()
+            if line:
+                yield line
+            else:
+                break
+
+    async def aclose(self) -> None:
+        return await to_thread.run_sync(self._fp.close)
+
+    async def read(self, size: int = -1) -> AnyStr:
+        return await to_thread.run_sync(self._fp.read, size)
+
+    async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
+        return await to_thread.run_sync(self._fp.read1, size)
+
+    async def readline(self) -> AnyStr:
+        return await to_thread.run_sync(self._fp.readline)
+
+    async def readlines(self) -> list[AnyStr]:
+        return await to_thread.run_sync(self._fp.readlines)
+
+    async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
+        return await to_thread.run_sync(self._fp.readinto, b)
+
+    async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
+        return await to_thread.run_sync(self._fp.readinto1, b)
+
+    @overload
+    async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
+
+    @overload
+    async def write(self: AsyncFile[str], b: str) -> int: ...
+
+    async def write(self, b: ReadableBuffer | str) -> int:
+        return await to_thread.run_sync(self._fp.write, b)
+
+    @overload
+    async def writelines(
+        self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
+    ) -> None: ...
+
+    @overload
+    async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
+
+    async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
+        return await to_thread.run_sync(self._fp.writelines, lines)
+
+    async def truncate(self, size: int | None = None) -> int:
+        return await to_thread.run_sync(self._fp.truncate, size)
+
+    async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
+        return await to_thread.run_sync(self._fp.seek, offset, whence)
+
+    async def tell(self) -> int:
+        return await to_thread.run_sync(self._fp.tell)
+
+    async def flush(self) -> None:
+        return await to_thread.run_sync(self._fp.flush)
+
+
+@overload
+async def open_file(
+    file: str | PathLike[str] | int,
+    mode: OpenBinaryMode,
+    buffering: int = ...,
+    encoding: str | None = ...,
+    errors: str | None = ...,
+    newline: str | None = ...,
+    closefd: bool = ...,
+    opener: Callable[[str, int], int] | None = ...,
+) -> AsyncFile[bytes]: ...
+
+
+@overload
+async def open_file(
+    file: str | PathLike[str] | int,
+    mode: OpenTextMode = ...,
+    buffering: int = ...,
+    encoding: str | None = ...,
+    errors: str | None = ...,
+    newline: str | None = ...,
+    closefd: bool = ...,
+    opener: Callable[[str, int], int] | None = ...,
+) -> AsyncFile[str]: ...
+
+
+async def open_file(
+    file: str | PathLike[str] | int,
+    mode: str = "r",
+    buffering: int = -1,
+    encoding: str | None = None,
+    errors: str | None = None,
+    newline: str | None = None,
+    closefd: bool = True,
+    opener: Callable[[str, int], int] | None = None,
+) -> AsyncFile[Any]:
+    """
+    Open a file asynchronously.
+
+    The arguments are exactly the same as for the builtin :func:`open`.
+
+    :return: an asynchronous file object
+
+    """
+    fp = await to_thread.run_sync(
+        open, file, mode, buffering, encoding, errors, newline, closefd, opener
+    )
+    return AsyncFile(fp)
+
+
+def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
+    """
+    Wrap an existing file as an asynchronous file.
+
+    :param file: an existing file-like object
+    :return: an asynchronous file object
+
+    """
+    return AsyncFile(file)
+
+
+@dataclass(eq=False)
+class _PathIterator(AsyncIterator["Path"]):
+    iterator: Iterator[PathLike[str]]
+
+    async def __anext__(self) -> Path:
+        nextval = await to_thread.run_sync(
+            next, self.iterator, None, abandon_on_cancel=True
+        )
+        if nextval is None:
+            raise StopAsyncIteration from None
+
+        return Path(nextval)
+
+
+class Path:
+    """
+    An asynchronous version of :class:`pathlib.Path`.
+
+    This class cannot be substituted for :class:`pathlib.Path` or
+    :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
+    interface.
+
+    It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
+    the deprecated :meth:`~pathlib.Path.link_to` method.
+
+    Some methods may be unavailable or have limited functionality, based on the Python
+    version:
+
+    * :meth:`~pathlib.Path.copy` (available on Python 3.14 or later)
+    * :meth:`~pathlib.Path.copy_into` (available on Python 3.14 or later)
+    * :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
+    * :meth:`~pathlib.PurePath.full_match` (available on Python 3.13 or later)
+    * :attr:`~pathlib.Path.info` (available on Python 3.14 or later)
+    * :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
+    * :meth:`~pathlib.PurePath.match` (the ``case_sensitive`` parameter is only
+      available on Python 3.13 or later)
+    * :meth:`~pathlib.Path.move` (available on Python 3.14 or later)
+    * :meth:`~pathlib.Path.move_into` (available on Python 3.14 or later)
+    * :meth:`~pathlib.PurePath.relative_to` (the ``walk_up`` parameter is only available
+      on Python 3.12 or later)
+    * :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
+
+    Any methods that do disk I/O need to be awaited on. These methods are:
+
+    * :meth:`~pathlib.Path.absolute`
+    * :meth:`~pathlib.Path.chmod`
+    * :meth:`~pathlib.Path.cwd`
+    * :meth:`~pathlib.Path.exists`
+    * :meth:`~pathlib.Path.expanduser`
+    * :meth:`~pathlib.Path.group`
+    * :meth:`~pathlib.Path.hardlink_to`
+    * :meth:`~pathlib.Path.home`
+    * :meth:`~pathlib.Path.is_block_device`
+    * :meth:`~pathlib.Path.is_char_device`
+    * :meth:`~pathlib.Path.is_dir`
+    * :meth:`~pathlib.Path.is_fifo`
+    * :meth:`~pathlib.Path.is_file`
+    * :meth:`~pathlib.Path.is_junction`
+    * :meth:`~pathlib.Path.is_mount`
+    * :meth:`~pathlib.Path.is_socket`
+    * :meth:`~pathlib.Path.is_symlink`
+    * :meth:`~pathlib.Path.lchmod`
+    * :meth:`~pathlib.Path.lstat`
+    * :meth:`~pathlib.Path.mkdir`
+    * :meth:`~pathlib.Path.open`
+    * :meth:`~pathlib.Path.owner`
+    * :meth:`~pathlib.Path.read_bytes`
+    * :meth:`~pathlib.Path.read_text`
+    * :meth:`~pathlib.Path.readlink`
+    * :meth:`~pathlib.Path.rename`
+    * :meth:`~pathlib.Path.replace`
+    * :meth:`~pathlib.Path.resolve`
+    * :meth:`~pathlib.Path.rmdir`
+    * :meth:`~pathlib.Path.samefile`
+    * :meth:`~pathlib.Path.stat`
+    * :meth:`~pathlib.Path.symlink_to`
+    * :meth:`~pathlib.Path.touch`
+    * :meth:`~pathlib.Path.unlink`
+    * :meth:`~pathlib.Path.walk`
+    * :meth:`~pathlib.Path.write_bytes`
+    * :meth:`~pathlib.Path.write_text`
+
+    Additionally, the following methods return an async iterator yielding
+    :class:`~.Path` objects:
+
+    * :meth:`~pathlib.Path.glob`
+    * :meth:`~pathlib.Path.iterdir`
+    * :meth:`~pathlib.Path.rglob`
+    """
+
+    __slots__ = "_path", "__weakref__"
+
+    __weakref__: Any
+
+    def __init__(self, *args: str | PathLike[str]) -> None:
+        self._path: Final[pathlib.Path] = pathlib.Path(*args)
+
+    def __fspath__(self) -> str:
+        return self._path.__fspath__()
+
+    def __str__(self) -> str:
+        return self._path.__str__()
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}({self.as_posix()!r})"
+
+    def __bytes__(self) -> bytes:
+        return self._path.__bytes__()
+
+    def __hash__(self) -> int:
+        return self._path.__hash__()
+
+    def __eq__(self, other: object) -> bool:
+        target = other._path if isinstance(other, Path) else other
+        return self._path.__eq__(target)
+
+    def __lt__(self, other: pathlib.PurePath | Path) -> bool:
+        target = other._path if isinstance(other, Path) else other
+        return self._path.__lt__(target)
+
+    def __le__(self, other: pathlib.PurePath | Path) -> bool:
+        target = other._path if isinstance(other, Path) else other
+        return self._path.__le__(target)
+
+    def __gt__(self, other: pathlib.PurePath | Path) -> bool:
+        target = other._path if isinstance(other, Path) else other
+        return self._path.__gt__(target)
+
+    def __ge__(self, other: pathlib.PurePath | Path) -> bool:
+        target = other._path if isinstance(other, Path) else other
+        return self._path.__ge__(target)
+
+    def __truediv__(self, other: str | PathLike[str]) -> Path:
+        return Path(self._path / other)
+
+    def __rtruediv__(self, other: str | PathLike[str]) -> Path:
+        return Path(other) / self
+
+    @property
+    def parts(self) -> tuple[str, ...]:
+        return self._path.parts
+
+    @property
+    def drive(self) -> str:
+        return self._path.drive
+
+    @property
+    def root(self) -> str:
+        return self._path.root
+
+    @property
+    def anchor(self) -> str:
+        return self._path.anchor
+
+    @property
+    def parents(self) -> Sequence[Path]:
+        return tuple(Path(p) for p in self._path.parents)
+
+    @property
+    def parent(self) -> Path:
+        return Path(self._path.parent)
+
+    @property
+    def name(self) -> str:
+        return self._path.name
+
+    @property
+    def suffix(self) -> str:
+        return self._path.suffix
+
+    @property
+    def suffixes(self) -> list[str]:
+        return self._path.suffixes
+
+    @property
+    def stem(self) -> str:
+        return self._path.stem
+
+    async def absolute(self) -> Path:
+        path = await to_thread.run_sync(self._path.absolute)
+        return Path(path)
+
+    def as_posix(self) -> str:
+        return self._path.as_posix()
+
+    def as_uri(self) -> str:
+        return self._path.as_uri()
+
+    if sys.version_info >= (3, 13):
+        parser: ClassVar[ModuleType] = pathlib.Path.parser
+
+        @classmethod
+        def from_uri(cls, uri: str) -> Path:
+            return Path(pathlib.Path.from_uri(uri))
+
+        def full_match(
+            self, path_pattern: str, *, case_sensitive: bool | None = None
+        ) -> bool:
+            return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
+
+        def match(
+            self, path_pattern: str, *, case_sensitive: bool | None = None
+        ) -> bool:
+            return self._path.match(path_pattern, case_sensitive=case_sensitive)
+    else:
+
+        def match(self, path_pattern: str) -> bool:
+            return self._path.match(path_pattern)
+
+    if sys.version_info >= (3, 14):
+
+        @property
+        def info(self) -> Any:  # TODO: add return type annotation when Typeshed gets it
+            return self._path.info
+
+        async def copy(
+            self,
+            target: str | os.PathLike[str],
+            *,
+            follow_symlinks: bool = True,
+            dirs_exist_ok: bool = False,
+            preserve_metadata: bool = False,
+        ) -> Path:
+            func = partial(
+                self._path.copy,
+                follow_symlinks=follow_symlinks,
+                dirs_exist_ok=dirs_exist_ok,
+                preserve_metadata=preserve_metadata,
+            )
+            return Path(await to_thread.run_sync(func, target))
+
+        async def copy_into(
+            self,
+            target_dir: str | os.PathLike[str],
+            *,
+            follow_symlinks: bool = True,
+            dirs_exist_ok: bool = False,
+            preserve_metadata: bool = False,
+        ) -> Path:
+            func = partial(
+                self._path.copy_into,
+                follow_symlinks=follow_symlinks,
+                dirs_exist_ok=dirs_exist_ok,
+                preserve_metadata=preserve_metadata,
+            )
+            return Path(await to_thread.run_sync(func, target_dir))
+
+        async def move(self, target: str | os.PathLike[str]) -> Path:
+            # Upstream does not handle anyio.Path properly as a PathLike
+            target = pathlib.Path(target)
+            return Path(await to_thread.run_sync(self._path.move, target))
+
+        async def move_into(
+            self,
+            target_dir: str | os.PathLike[str],
+        ) -> Path:
+            return Path(await to_thread.run_sync(self._path.move_into, target_dir))
+
+    def is_relative_to(self, other: str | PathLike[str]) -> bool:
+        try:
+            self.relative_to(other)
+            return True
+        except ValueError:
+            return False
+
+    async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
+        func = partial(os.chmod, follow_symlinks=follow_symlinks)
+        return await to_thread.run_sync(func, self._path, mode)
+
+    @classmethod
+    async def cwd(cls) -> Path:
+        path = await to_thread.run_sync(pathlib.Path.cwd)
+        return cls(path)
+
+    async def exists(self) -> bool:
+        return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
+
+    async def expanduser(self) -> Path:
+        return Path(
+            await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
+        )
+
+    def glob(self, pattern: str) -> AsyncIterator[Path]:
+        gen = self._path.glob(pattern)
+        return _PathIterator(gen)
+
+    async def group(self) -> str:
+        return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
+
+    async def hardlink_to(
+        self, target: str | bytes | PathLike[str] | PathLike[bytes]
+    ) -> None:
+        if isinstance(target, Path):
+            target = target._path
+
+        await to_thread.run_sync(os.link, target, self)
+
+    @classmethod
+    async def home(cls) -> Path:
+        home_path = await to_thread.run_sync(pathlib.Path.home)
+        return cls(home_path)
+
+    def is_absolute(self) -> bool:
+        return self._path.is_absolute()
+
+    async def is_block_device(self) -> bool:
+        return await to_thread.run_sync(
+            self._path.is_block_device, abandon_on_cancel=True
+        )
+
+    async def is_char_device(self) -> bool:
+        return await to_thread.run_sync(
+            self._path.is_char_device, abandon_on_cancel=True
+        )
+
+    async def is_dir(self) -> bool:
+        return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
+
+    async def is_fifo(self) -> bool:
+        return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
+
+    async def is_file(self) -> bool:
+        return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
+
+    if sys.version_info >= (3, 12):
+
+        async def is_junction(self) -> bool:
+            return await to_thread.run_sync(self._path.is_junction)
+
+    async def is_mount(self) -> bool:
+        return await to_thread.run_sync(
+            os.path.ismount, self._path, abandon_on_cancel=True
+        )
+
+    def is_reserved(self) -> bool:
+        return self._path.is_reserved()
+
+    async def is_socket(self) -> bool:
+        return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
+
+    async def is_symlink(self) -> bool:
+        return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
+
+    async def iterdir(self) -> AsyncIterator[Path]:
+        gen = (
+            self._path.iterdir()
+            if sys.version_info < (3, 13)
+            else await to_thread.run_sync(self._path.iterdir, abandon_on_cancel=True)
+        )
+        async for path in _PathIterator(gen):
+            yield path
+
+    def joinpath(self, *args: str | PathLike[str]) -> Path:
+        return Path(self._path.joinpath(*args))
+
+    async def lchmod(self, mode: int) -> None:
+        await to_thread.run_sync(self._path.lchmod, mode)
+
+    async def lstat(self) -> os.stat_result:
+        return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
+
+    async def mkdir(
+        self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
+    ) -> None:
+        await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
+
+    @overload
+    async def open(
+        self,
+        mode: OpenBinaryMode,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        errors: str | None = ...,
+        newline: str | None = ...,
+    ) -> AsyncFile[bytes]: ...
+
+    @overload
+    async def open(
+        self,
+        mode: OpenTextMode = ...,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        errors: str | None = ...,
+        newline: str | None = ...,
+    ) -> AsyncFile[str]: ...
+
+    async def open(
+        self,
+        mode: str = "r",
+        buffering: int = -1,
+        encoding: str | None = None,
+        errors: str | None = None,
+        newline: str | None = None,
+    ) -> AsyncFile[Any]:
+        fp = await to_thread.run_sync(
+            self._path.open, mode, buffering, encoding, errors, newline
+        )
+        return AsyncFile(fp)
+
+    async def owner(self) -> str:
+        return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
+
+    async def read_bytes(self) -> bytes:
+        return await to_thread.run_sync(self._path.read_bytes)
+
+    async def read_text(
+        self, encoding: str | None = None, errors: str | None = None
+    ) -> str:
+        return await to_thread.run_sync(self._path.read_text, encoding, errors)
+
+    if sys.version_info >= (3, 12):
+
+        def relative_to(
+            self, *other: str | PathLike[str], walk_up: bool = False
+        ) -> Path:
+            return Path(self._path.relative_to(*other, walk_up=walk_up))
+
+    else:
+
+        def relative_to(self, *other: str | PathLike[str]) -> Path:
+            return Path(self._path.relative_to(*other))
+
+    async def readlink(self) -> Path:
+        target = await to_thread.run_sync(os.readlink, self._path)
+        return Path(target)
+
+    async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
+        if isinstance(target, Path):
+            target = target._path
+
+        await to_thread.run_sync(self._path.rename, target)
+        return Path(target)
+
+    async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
+        if isinstance(target, Path):
+            target = target._path
+
+        await to_thread.run_sync(self._path.replace, target)
+        return Path(target)
+
+    async def resolve(self, strict: bool = False) -> Path:
+        func = partial(self._path.resolve, strict=strict)
+        return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
+
+    def rglob(self, pattern: str) -> AsyncIterator[Path]:
+        gen = self._path.rglob(pattern)
+        return _PathIterator(gen)
+
+    async def rmdir(self) -> None:
+        await to_thread.run_sync(self._path.rmdir)
+
+    async def samefile(self, other_path: str | PathLike[str]) -> bool:
+        if isinstance(other_path, Path):
+            other_path = other_path._path
+
+        return await to_thread.run_sync(
+            self._path.samefile, other_path, abandon_on_cancel=True
+        )
+
+    async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
+        func = partial(os.stat, follow_symlinks=follow_symlinks)
+        return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
+
+    async def symlink_to(
+        self,
+        target: str | bytes | PathLike[str] | PathLike[bytes],
+        target_is_directory: bool = False,
+    ) -> None:
+        if isinstance(target, Path):
+            target = target._path
+
+        await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
+
+    async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
+        await to_thread.run_sync(self._path.touch, mode, exist_ok)
+
+    async def unlink(self, missing_ok: bool = False) -> None:
+        try:
+            await to_thread.run_sync(self._path.unlink)
+        except FileNotFoundError:
+            if not missing_ok:
+                raise
+
+    if sys.version_info >= (3, 12):
+
+        async def walk(
+            self,
+            top_down: bool = True,
+            on_error: Callable[[OSError], object] | None = None,
+            follow_symlinks: bool = False,
+        ) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
+            def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
+                try:
+                    return next(gen)
+                except StopIteration:
+                    return None
+
+            gen = self._path.walk(top_down, on_error, follow_symlinks)
+            while True:
+                value = await to_thread.run_sync(get_next_value)
+                if value is None:
+                    return
+
+                root, dirs, paths = value
+                yield Path(root), dirs, paths
+
+    def with_name(self, name: str) -> Path:
+        return Path(self._path.with_name(name))
+
+    def with_stem(self, stem: str) -> Path:
+        return Path(self._path.with_name(stem + self._path.suffix))
+
+    def with_suffix(self, suffix: str) -> Path:
+        return Path(self._path.with_suffix(suffix))
+
+    def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
+        return Path(*pathsegments)
+
+    async def write_bytes(self, data: bytes) -> int:
+        return await to_thread.run_sync(self._path.write_bytes, data)
+
+    async def write_text(
+        self,
+        data: str,
+        encoding: str | None = None,
+        errors: str | None = None,
+        newline: str | None = None,
+    ) -> int:
+        # Path.write_text() does not support the "newline" parameter before Python 3.10
+        def sync_write_text() -> int:
+            with self._path.open(
+                "w", encoding=encoding, errors=errors, newline=newline
+            ) as fp:
+                return fp.write(data)
+
+        return await to_thread.run_sync(sync_write_text)
+
+
+PathLike.register(Path)
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_resources.py b/.venv/lib/python3.12/site-packages/anyio/_core/_resources.py
new file mode 100644
index 00000000..b9a5344a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_resources.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from ..abc import AsyncResource
+from ._tasks import CancelScope
+
+
+async def aclose_forcefully(resource: AsyncResource) -> None:
+    """
+    Close an asynchronous resource in a cancelled scope.
+
+    Doing this closes the resource without waiting on anything.
+
+    :param resource: the resource to close
+
+    """
+    with CancelScope() as scope:
+        scope.cancel()
+        await resource.aclose()
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_signals.py b/.venv/lib/python3.12/site-packages/anyio/_core/_signals.py
new file mode 100644
index 00000000..f3451d30
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_signals.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+
+from collections.abc import AsyncIterator
+from contextlib import AbstractContextManager
+from signal import Signals
+
+from ._eventloop import get_async_backend
+
+
+def open_signal_receiver(
+    *signals: Signals,
+) -> AbstractContextManager[AsyncIterator[Signals]]:
+    """
+    Start receiving operating system signals.
+
+    :param signals: signals to receive (e.g. ``signal.SIGINT``)
+    :return: an asynchronous context manager for an asynchronous iterator which yields
+        signal numbers
+
+    .. warning:: Windows does not support signals natively so it is best to avoid
+        relying on this in cross-platform applications.
+
+    .. warning:: On asyncio, this permanently replaces any previous signal handler for
+        the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
+
+    """
+    return get_async_backend().open_signal_receiver(*signals)
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_sockets.py b/.venv/lib/python3.12/site-packages/anyio/_core/_sockets.py
new file mode 100644
index 00000000..054bcdda
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_sockets.py
@@ -0,0 +1,792 @@
+from __future__ import annotations
+
+import errno
+import os
+import socket
+import ssl
+import stat
+import sys
+from collections.abc import Awaitable
+from ipaddress import IPv6Address, ip_address
+from os import PathLike, chmod
+from socket import AddressFamily, SocketKind
+from typing import TYPE_CHECKING, Any, Literal, cast, overload
+
+from .. import to_thread
+from ..abc import (
+    ConnectedUDPSocket,
+    ConnectedUNIXDatagramSocket,
+    IPAddressType,
+    IPSockAddrType,
+    SocketListener,
+    SocketStream,
+    UDPSocket,
+    UNIXDatagramSocket,
+    UNIXSocketStream,
+)
+from ..streams.stapled import MultiListener
+from ..streams.tls import TLSStream
+from ._eventloop import get_async_backend
+from ._resources import aclose_forcefully
+from ._synchronization import Event
+from ._tasks import create_task_group, move_on_after
+
+if TYPE_CHECKING:
+    from _typeshed import FileDescriptorLike
+else:
+    FileDescriptorLike = object
+
+if sys.version_info < (3, 11):
+    from exceptiongroup import ExceptionGroup
+
+if sys.version_info < (3, 13):
+    from typing_extensions import deprecated
+else:
+    from warnings import deprecated
+
+IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41)  # https://bugs.python.org/issue29515
+
+AnyIPAddressFamily = Literal[
+    AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
+]
+IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
+
+
+# tls_hostname given
+@overload
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = ...,
+    ssl_context: ssl.SSLContext | None = ...,
+    tls_standard_compatible: bool = ...,
+    tls_hostname: str,
+    happy_eyeballs_delay: float = ...,
+) -> TLSStream: ...
+
+
+# ssl_context given
+@overload
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = ...,
+    ssl_context: ssl.SSLContext,
+    tls_standard_compatible: bool = ...,
+    tls_hostname: str | None = ...,
+    happy_eyeballs_delay: float = ...,
+) -> TLSStream: ...
+
+
+# tls=True
+@overload
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = ...,
+    tls: Literal[True],
+    ssl_context: ssl.SSLContext | None = ...,
+    tls_standard_compatible: bool = ...,
+    tls_hostname: str | None = ...,
+    happy_eyeballs_delay: float = ...,
+) -> TLSStream: ...
+
+
+# tls=False
+@overload
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = ...,
+    tls: Literal[False],
+    ssl_context: ssl.SSLContext | None = ...,
+    tls_standard_compatible: bool = ...,
+    tls_hostname: str | None = ...,
+    happy_eyeballs_delay: float = ...,
+) -> SocketStream: ...
+
+
+# No TLS arguments
+@overload
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = ...,
+    happy_eyeballs_delay: float = ...,
+) -> SocketStream: ...
+
+
+async def connect_tcp(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    local_host: IPAddressType | None = None,
+    tls: bool = False,
+    ssl_context: ssl.SSLContext | None = None,
+    tls_standard_compatible: bool = True,
+    tls_hostname: str | None = None,
+    happy_eyeballs_delay: float = 0.25,
+) -> SocketStream | TLSStream:
+    """
+    Connect to a host using the TCP protocol.
+
+    This function implements the stateless version of the Happy Eyeballs algorithm (RFC
+    6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
+    each one is tried until one connection attempt succeeds. If the first attempt does
+    not connected within 250 milliseconds, a second attempt is started using the next
+    address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
+    available) is tried first.
+
+    When the connection has been established, a TLS handshake will be done if either
+    ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
+
+    :param remote_host: the IP address or host name to connect to
+    :param remote_port: port on the target host to connect to
+    :param local_host: the interface address or name to bind the socket to before
+        connecting
+    :param tls: ``True`` to do a TLS handshake with the connected stream and return a
+        :class:`~anyio.streams.tls.TLSStream` instead
+    :param ssl_context: the SSL context object to use (if omitted, a default context is
+        created)
+    :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
+        before closing the stream and requires that the server does this as well.
+        Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
+        Some protocols, such as HTTP, require this option to be ``False``.
+        See :meth:`~ssl.SSLContext.wrap_socket` for details.
+    :param tls_hostname: host name to check the server certificate against (defaults to
+        the value of ``remote_host``)
+    :param happy_eyeballs_delay: delay (in seconds) before starting the next connection
+        attempt
+    :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
+    :raises OSError: if the connection attempt fails
+
+    """
+    # Placed here due to https://github.com/python/mypy/issues/7057
+    connected_stream: SocketStream | None = None
+
+    async def try_connect(remote_host: str, event: Event) -> None:
+        nonlocal connected_stream
+        try:
+            stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
+        except OSError as exc:
+            oserrors.append(exc)
+            return
+        else:
+            if connected_stream is None:
+                connected_stream = stream
+                tg.cancel_scope.cancel()
+            else:
+                await stream.aclose()
+        finally:
+            event.set()
+
+    asynclib = get_async_backend()
+    local_address: IPSockAddrType | None = None
+    family = socket.AF_UNSPEC
+    if local_host:
+        gai_res = await getaddrinfo(str(local_host), None)
+        family, *_, local_address = gai_res[0]
+
+    target_host = str(remote_host)
+    try:
+        addr_obj = ip_address(remote_host)
+    except ValueError:
+        addr_obj = None
+
+    if addr_obj is not None:
+        if isinstance(addr_obj, IPv6Address):
+            target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
+        else:
+            target_addrs = [(socket.AF_INET, addr_obj.compressed)]
+    else:
+        # getaddrinfo() will raise an exception if name resolution fails
+        gai_res = await getaddrinfo(
+            target_host, remote_port, family=family, type=socket.SOCK_STREAM
+        )
+
+        # Organize the list so that the first address is an IPv6 address (if available)
+        # and the second one is an IPv4 addresses. The rest can be in whatever order.
+        v6_found = v4_found = False
+        target_addrs = []
+        for af, *rest, sa in gai_res:
+            if af == socket.AF_INET6 and not v6_found:
+                v6_found = True
+                target_addrs.insert(0, (af, sa[0]))
+            elif af == socket.AF_INET and not v4_found and v6_found:
+                v4_found = True
+                target_addrs.insert(1, (af, sa[0]))
+            else:
+                target_addrs.append((af, sa[0]))
+
+    oserrors: list[OSError] = []
+    try:
+        async with create_task_group() as tg:
+            for i, (af, addr) in enumerate(target_addrs):
+                event = Event()
+                tg.start_soon(try_connect, addr, event)
+                with move_on_after(happy_eyeballs_delay):
+                    await event.wait()
+
+        if connected_stream is None:
+            cause = (
+                oserrors[0]
+                if len(oserrors) == 1
+                else ExceptionGroup("multiple connection attempts failed", oserrors)
+            )
+            raise OSError("All connection attempts failed") from cause
+    finally:
+        oserrors.clear()
+
+    if tls or tls_hostname or ssl_context:
+        try:
+            return await TLSStream.wrap(
+                connected_stream,
+                server_side=False,
+                hostname=tls_hostname or str(remote_host),
+                ssl_context=ssl_context,
+                standard_compatible=tls_standard_compatible,
+            )
+        except BaseException:
+            await aclose_forcefully(connected_stream)
+            raise
+
+    return connected_stream
+
+
+async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
+    """
+    Connect to the given UNIX socket.
+
+    Not available on Windows.
+
+    :param path: path to the socket
+    :return: a socket stream object
+
+    """
+    path = os.fspath(path)
+    return await get_async_backend().connect_unix(path)
+
+
+async def create_tcp_listener(
+    *,
+    local_host: IPAddressType | None = None,
+    local_port: int = 0,
+    family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
+    backlog: int = 65536,
+    reuse_port: bool = False,
+) -> MultiListener[SocketStream]:
+    """
+    Create a TCP socket listener.
+
+    :param local_port: port number to listen on
+    :param local_host: IP address of the interface to listen on. If omitted, listen on
+        all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
+        family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
+    :param family: address family (used if ``local_host`` was omitted)
+    :param backlog: maximum number of queued incoming connections (up to a maximum of
+        2**16, or 65536)
+    :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+        address/port (not supported on Windows)
+    :return: a list of listener objects
+
+    """
+    asynclib = get_async_backend()
+    backlog = min(backlog, 65536)
+    local_host = str(local_host) if local_host is not None else None
+    gai_res = await getaddrinfo(
+        local_host,
+        local_port,
+        family=family,
+        type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
+        flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+    )
+    listeners: list[SocketListener] = []
+    try:
+        # The set() is here to work around a glibc bug:
+        # https://sourceware.org/bugzilla/show_bug.cgi?id=14969
+        sockaddr: tuple[str, int] | tuple[str, int, int, int]
+        for fam, kind, *_, sockaddr in sorted(set(gai_res)):
+            # Workaround for an uvloop bug where we don't get the correct scope ID for
+            # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
+            # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
+            if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
+                continue
+
+            raw_socket = socket.socket(fam)
+            raw_socket.setblocking(False)
+
+            # For Windows, enable exclusive address use. For others, enable address
+            # reuse.
+            if sys.platform == "win32":
+                raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
+            else:
+                raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+            if reuse_port:
+                raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+
+            # If only IPv6 was requested, disable dual stack operation
+            if fam == socket.AF_INET6:
+                raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
+
+                # Workaround for #554
+                if "%" in sockaddr[0]:
+                    addr, scope_id = sockaddr[0].split("%", 1)
+                    sockaddr = (addr, sockaddr[1], 0, int(scope_id))
+
+            raw_socket.bind(sockaddr)
+            raw_socket.listen(backlog)
+            listener = asynclib.create_tcp_listener(raw_socket)
+            listeners.append(listener)
+    except BaseException:
+        for listener in listeners:
+            await listener.aclose()
+
+        raise
+
+    return MultiListener(listeners)
+
+
+async def create_unix_listener(
+    path: str | bytes | PathLike[Any],
+    *,
+    mode: int | None = None,
+    backlog: int = 65536,
+) -> SocketListener:
+    """
+    Create a UNIX socket listener.
+
+    Not available on Windows.
+
+    :param path: path of the socket
+    :param mode: permissions to set on the socket
+    :param backlog: maximum number of queued incoming connections (up to a maximum of
+        2**16, or 65536)
+    :return: a listener object
+
+    .. versionchanged:: 3.0
+        If a socket already exists on the file system in the given path, it will be
+        removed first.
+
+    """
+    backlog = min(backlog, 65536)
+    raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
+    try:
+        raw_socket.listen(backlog)
+        return get_async_backend().create_unix_listener(raw_socket)
+    except BaseException:
+        raw_socket.close()
+        raise
+
+
+async def create_udp_socket(
+    family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
+    *,
+    local_host: IPAddressType | None = None,
+    local_port: int = 0,
+    reuse_port: bool = False,
+) -> UDPSocket:
+    """
+    Create a UDP socket.
+
+    If ``port`` has been given, the socket will be bound to this port on the local
+    machine, making this socket suitable for providing UDP based services.
+
+    :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
+        determined from ``local_host`` if omitted
+    :param local_host: IP address or host name of the local interface to bind to
+    :param local_port: local port to bind to
+    :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+        address/port (not supported on Windows)
+    :return: a UDP socket
+
+    """
+    if family is AddressFamily.AF_UNSPEC and not local_host:
+        raise ValueError('Either "family" or "local_host" must be given')
+
+    if local_host:
+        gai_res = await getaddrinfo(
+            str(local_host),
+            local_port,
+            family=family,
+            type=socket.SOCK_DGRAM,
+            flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+        )
+        family = cast(AnyIPAddressFamily, gai_res[0][0])
+        local_address = gai_res[0][-1]
+    elif family is AddressFamily.AF_INET6:
+        local_address = ("::", 0)
+    else:
+        local_address = ("0.0.0.0", 0)
+
+    sock = await get_async_backend().create_udp_socket(
+        family, local_address, None, reuse_port
+    )
+    return cast(UDPSocket, sock)
+
+
+async def create_connected_udp_socket(
+    remote_host: IPAddressType,
+    remote_port: int,
+    *,
+    family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
+    local_host: IPAddressType | None = None,
+    local_port: int = 0,
+    reuse_port: bool = False,
+) -> ConnectedUDPSocket:
+    """
+    Create a connected UDP socket.
+
+    Connected UDP sockets can only communicate with the specified remote host/port, an
+    any packets sent from other sources are dropped.
+
+    :param remote_host: remote host to set as the default target
+    :param remote_port: port on the remote host to set as the default target
+    :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
+        determined from ``local_host`` or ``remote_host`` if omitted
+    :param local_host: IP address or host name of the local interface to bind to
+    :param local_port: local port to bind to
+    :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+        address/port (not supported on Windows)
+    :return: a connected UDP socket
+
+    """
+    local_address = None
+    if local_host:
+        gai_res = await getaddrinfo(
+            str(local_host),
+            local_port,
+            family=family,
+            type=socket.SOCK_DGRAM,
+            flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+        )
+        family = cast(AnyIPAddressFamily, gai_res[0][0])
+        local_address = gai_res[0][-1]
+
+    gai_res = await getaddrinfo(
+        str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
+    )
+    family = cast(AnyIPAddressFamily, gai_res[0][0])
+    remote_address = gai_res[0][-1]
+
+    sock = await get_async_backend().create_udp_socket(
+        family, local_address, remote_address, reuse_port
+    )
+    return cast(ConnectedUDPSocket, sock)
+
+
+async def create_unix_datagram_socket(
+    *,
+    local_path: None | str | bytes | PathLike[Any] = None,
+    local_mode: int | None = None,
+) -> UNIXDatagramSocket:
+    """
+    Create a UNIX datagram socket.
+
+    Not available on Windows.
+
+    If ``local_path`` has been given, the socket will be bound to this path, making this
+    socket suitable for receiving datagrams from other processes. Other processes can
+    send datagrams to this socket only if ``local_path`` is set.
+
+    If a socket already exists on the file system in the ``local_path``, it will be
+    removed first.
+
+    :param local_path: the path on which to bind to
+    :param local_mode: permissions to set on the local socket
+    :return: a UNIX datagram socket
+
+    """
+    raw_socket = await setup_unix_local_socket(
+        local_path, local_mode, socket.SOCK_DGRAM
+    )
+    return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
+
+
+async def create_connected_unix_datagram_socket(
+    remote_path: str | bytes | PathLike[Any],
+    *,
+    local_path: None | str | bytes | PathLike[Any] = None,
+    local_mode: int | None = None,
+) -> ConnectedUNIXDatagramSocket:
+    """
+    Create a connected UNIX datagram socket.
+
+    Connected datagram sockets can only communicate with the specified remote path.
+
+    If ``local_path`` has been given, the socket will be bound to this path, making
+    this socket suitable for receiving datagrams from other processes. Other processes
+    can send datagrams to this socket only if ``local_path`` is set.
+
+    If a socket already exists on the file system in the ``local_path``, it will be
+    removed first.
+
+    :param remote_path: the path to set as the default target
+    :param local_path: the path on which to bind to
+    :param local_mode: permissions to set on the local socket
+    :return: a connected UNIX datagram socket
+
+    """
+    remote_path = os.fspath(remote_path)
+    raw_socket = await setup_unix_local_socket(
+        local_path, local_mode, socket.SOCK_DGRAM
+    )
+    return await get_async_backend().create_unix_datagram_socket(
+        raw_socket, remote_path
+    )
+
+
+async def getaddrinfo(
+    host: bytes | str | None,
+    port: str | int | None,
+    *,
+    family: int | AddressFamily = 0,
+    type: int | SocketKind = 0,
+    proto: int = 0,
+    flags: int = 0,
+) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
+    """
+    Look up a numeric IP address given a host name.
+
+    Internationalized domain names are translated according to the (non-transitional)
+    IDNA 2008 standard.
+
+    .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
+        (host, port), unlike what :func:`socket.getaddrinfo` does.
+
+    :param host: host name
+    :param port: port number
+    :param family: socket family (`'AF_INET``, ...)
+    :param type: socket type (``SOCK_STREAM``, ...)
+    :param proto: protocol number
+    :param flags: flags to pass to upstream ``getaddrinfo()``
+    :return: list of tuples containing (family, type, proto, canonname, sockaddr)
+
+    .. seealso:: :func:`socket.getaddrinfo`
+
+    """
+    # Handle unicode hostnames
+    if isinstance(host, str):
+        try:
+            encoded_host: bytes | None = host.encode("ascii")
+        except UnicodeEncodeError:
+            import idna
+
+            encoded_host = idna.encode(host, uts46=True)
+    else:
+        encoded_host = host
+
+    gai_res = await get_async_backend().getaddrinfo(
+        encoded_host, port, family=family, type=type, proto=proto, flags=flags
+    )
+    return [
+        (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
+        for family, type, proto, canonname, sockaddr in gai_res
+        # filter out IPv6 results when IPv6 is disabled
+        if not isinstance(sockaddr[0], int)
+    ]
+
+
+def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
+    """
+    Look up the host name of an IP address.
+
+    :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
+    :param flags: flags to pass to upstream ``getnameinfo()``
+    :return: a tuple of (host name, service name)
+
+    .. seealso:: :func:`socket.getnameinfo`
+
+    """
+    return get_async_backend().getnameinfo(sockaddr, flags)
+
+
+@deprecated("This function is deprecated; use `wait_readable` instead")
+def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
+    """
+    .. deprecated:: 4.7.0
+       Use :func:`wait_readable` instead.
+
+    Wait until the given socket has data to be read.
+
+    .. warning:: Only use this on raw sockets that have not been wrapped by any higher
+        level constructs like socket streams!
+
+    :param sock: a socket object
+    :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
+        socket to become readable
+    :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
+        to become readable
+
+    """
+    return get_async_backend().wait_readable(sock.fileno())
+
+
+@deprecated("This function is deprecated; use `wait_writable` instead")
+def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
+    """
+    .. deprecated:: 4.7.0
+       Use :func:`wait_writable` instead.
+
+    Wait until the given socket can be written to.
+
+    This does **NOT** work on Windows when using the asyncio backend with a proactor
+    event loop (default on py3.8+).
+
+    .. warning:: Only use this on raw sockets that have not been wrapped by any higher
+        level constructs like socket streams!
+
+    :param sock: a socket object
+    :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
+        socket to become writable
+    :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
+        to become writable
+
+    """
+    return get_async_backend().wait_writable(sock.fileno())
+
+
+def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]:
+    """
+    Wait until the given object has data to be read.
+
+    On Unix systems, ``obj`` must either be an integer file descriptor, or else an
+    object with a ``.fileno()`` method which returns an integer file descriptor. Any
+    kind of file descriptor can be passed, though the exact semantics will depend on
+    your kernel. For example, this probably won't do anything useful for on-disk files.
+
+    On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an
+    object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File
+    descriptors aren't supported, and neither are handles that refer to anything besides
+    a ``SOCKET``.
+
+    On backends where this functionality is not natively provided (asyncio
+    ``ProactorEventLoop`` on Windows), it is provided using a separate selector thread
+    which is set to shut down when the interpreter shuts down.
+
+    .. warning:: Don't use this on raw sockets that have been wrapped by any higher
+        level constructs like socket streams!
+
+    :param obj: an object with a ``.fileno()`` method or an integer handle
+    :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
+        object to become readable
+    :raises ~anyio.BusyResourceError: if another task is already waiting for the object
+        to become readable
+
+    """
+    return get_async_backend().wait_readable(obj)
+
+
+def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]:
+    """
+    Wait until the given object can be written to.
+
+    :param obj: an object with a ``.fileno()`` method or an integer handle
+    :raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
+        object to become writable
+    :raises ~anyio.BusyResourceError: if another task is already waiting for the object
+        to become writable
+
+    .. seealso:: See the documentation of :func:`wait_readable` for the definition of
+       ``obj`` and notes on backend compatibility.
+
+    .. warning:: Don't use this on raw sockets that have been wrapped by any higher
+        level constructs like socket streams!
+
+    """
+    return get_async_backend().wait_writable(obj)
+
+
+#
+# Private API
+#
+
+
+def convert_ipv6_sockaddr(
+    sockaddr: tuple[str, int, int, int] | tuple[str, int],
+) -> tuple[str, int]:
+    """
+    Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
+
+    If the scope ID is nonzero, it is added to the address, separated with ``%``.
+    Otherwise the flow id and scope id are simply cut off from the tuple.
+    Any other kinds of socket addresses are returned as-is.
+
+    :param sockaddr: the result of :meth:`~socket.socket.getsockname`
+    :return: the converted socket address
+
+    """
+    # This is more complicated than it should be because of MyPy
+    if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
+        host, port, flowinfo, scope_id = sockaddr
+        if scope_id:
+            # PyPy (as of v7.3.11) leaves the interface name in the result, so
+            # we discard it and only get the scope ID from the end
+            # (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
+            host = host.split("%")[0]
+
+            # Add scope_id to the address
+            return f"{host}%{scope_id}", port
+        else:
+            return host, port
+    else:
+        return sockaddr
+
+
+async def setup_unix_local_socket(
+    path: None | str | bytes | PathLike[Any],
+    mode: int | None,
+    socktype: int,
+) -> socket.socket:
+    """
+    Create a UNIX local socket object, deleting the socket at the given path if it
+    exists.
+
+    Not available on Windows.
+
+    :param path: path of the socket
+    :param mode: permissions to set on the socket
+    :param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
+
+    """
+    path_str: str | None
+    if path is not None:
+        path_str = os.fsdecode(path)
+
+        # Linux abstract namespace sockets aren't backed by a concrete file so skip stat call
+        if not path_str.startswith("\0"):
+            # Copied from pathlib...
+            try:
+                stat_result = os.stat(path)
+            except OSError as e:
+                if e.errno not in (
+                    errno.ENOENT,
+                    errno.ENOTDIR,
+                    errno.EBADF,
+                    errno.ELOOP,
+                ):
+                    raise
+            else:
+                if stat.S_ISSOCK(stat_result.st_mode):
+                    os.unlink(path)
+    else:
+        path_str = None
+
+    raw_socket = socket.socket(socket.AF_UNIX, socktype)
+    raw_socket.setblocking(False)
+
+    if path_str is not None:
+        try:
+            await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
+            if mode is not None:
+                await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
+        except BaseException:
+            raw_socket.close()
+            raise
+
+    return raw_socket
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_streams.py b/.venv/lib/python3.12/site-packages/anyio/_core/_streams.py
new file mode 100644
index 00000000..6a9814e5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_streams.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import math
+from typing import TypeVar
+from warnings import warn
+
+from ..streams.memory import (
+    MemoryObjectReceiveStream,
+    MemoryObjectSendStream,
+    MemoryObjectStreamState,
+)
+
+T_Item = TypeVar("T_Item")
+
+
+class create_memory_object_stream(
+    tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
+):
+    """
+    Create a memory object stream.
+
+    The stream's item type can be annotated like
+    :func:`create_memory_object_stream[T_Item]`.
+
+    :param max_buffer_size: number of items held in the buffer until ``send()`` starts
+        blocking
+    :param item_type: old way of marking the streams with the right generic type for
+        static typing (does nothing on AnyIO 4)
+
+        .. deprecated:: 4.0
+          Use ``create_memory_object_stream[YourItemType](...)`` instead.
+    :return: a tuple of (send stream, receive stream)
+
+    """
+
+    def __new__(  # type: ignore[misc]
+        cls, max_buffer_size: float = 0, item_type: object = None
+    ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
+        if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
+            raise ValueError("max_buffer_size must be either an integer or math.inf")
+        if max_buffer_size < 0:
+            raise ValueError("max_buffer_size cannot be negative")
+        if item_type is not None:
+            warn(
+                "The item_type argument has been deprecated in AnyIO 4.0. "
+                "Use create_memory_object_stream[YourItemType](...) instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+
+        state = MemoryObjectStreamState[T_Item](max_buffer_size)
+        return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py b/.venv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py
new file mode 100644
index 00000000..36d9b306
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py
@@ -0,0 +1,202 @@
+from __future__ import annotations
+
+import sys
+from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
+from io import BytesIO
+from os import PathLike
+from subprocess import PIPE, CalledProcessError, CompletedProcess
+from typing import IO, Any, Union, cast
+
+from ..abc import Process
+from ._eventloop import get_async_backend
+from ._tasks import create_task_group
+
+if sys.version_info >= (3, 10):
+    from typing import TypeAlias
+else:
+    from typing_extensions import TypeAlias
+
+StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
+
+
+async def run_process(
+    command: StrOrBytesPath | Sequence[StrOrBytesPath],
+    *,
+    input: bytes | None = None,
+    stdin: int | IO[Any] | None = None,
+    stdout: int | IO[Any] | None = PIPE,
+    stderr: int | IO[Any] | None = PIPE,
+    check: bool = True,
+    cwd: StrOrBytesPath | None = None,
+    env: Mapping[str, str] | None = None,
+    startupinfo: Any = None,
+    creationflags: int = 0,
+    start_new_session: bool = False,
+    pass_fds: Sequence[int] = (),
+    user: str | int | None = None,
+    group: str | int | None = None,
+    extra_groups: Iterable[str | int] | None = None,
+    umask: int = -1,
+) -> CompletedProcess[bytes]:
+    """
+    Run an external command in a subprocess and wait until it completes.
+
+    .. seealso:: :func:`subprocess.run`
+
+    :param command: either a string to pass to the shell, or an iterable of strings
+        containing the executable name or path and its arguments
+    :param input: bytes passed to the standard input of the subprocess
+    :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+        a file-like object, or `None`; ``input`` overrides this
+    :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+        a file-like object, or `None`
+    :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+        :data:`subprocess.STDOUT`, a file-like object, or `None`
+    :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
+        process terminates with a return code other than 0
+    :param cwd: If not ``None``, change the working directory to this before running the
+        command
+    :param env: if not ``None``, this mapping replaces the inherited environment
+        variables from the parent process
+    :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
+        to specify process startup parameters (Windows only)
+    :param creationflags: flags that can be used to control the creation of the
+        subprocess (see :class:`subprocess.Popen` for the specifics)
+    :param start_new_session: if ``true`` the setsid() system call will be made in the
+        child process prior to the execution of the subprocess. (POSIX only)
+    :param pass_fds: sequence of file descriptors to keep open between the parent and
+        child processes. (POSIX only)
+    :param user: effective user to run the process as (Python >= 3.9, POSIX only)
+    :param group: effective group to run the process as (Python >= 3.9, POSIX only)
+    :param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
+        POSIX only)
+    :param umask: if not negative, this umask is applied in the child process before
+        running the given command (Python >= 3.9, POSIX only)
+    :return: an object representing the completed process
+    :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
+        exits with a nonzero return code
+
+    """
+
+    async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
+        buffer = BytesIO()
+        async for chunk in stream:
+            buffer.write(chunk)
+
+        stream_contents[index] = buffer.getvalue()
+
+    if stdin is not None and input is not None:
+        raise ValueError("only one of stdin and input is allowed")
+
+    async with await open_process(
+        command,
+        stdin=PIPE if input else stdin,
+        stdout=stdout,
+        stderr=stderr,
+        cwd=cwd,
+        env=env,
+        startupinfo=startupinfo,
+        creationflags=creationflags,
+        start_new_session=start_new_session,
+        pass_fds=pass_fds,
+        user=user,
+        group=group,
+        extra_groups=extra_groups,
+        umask=umask,
+    ) as process:
+        stream_contents: list[bytes | None] = [None, None]
+        async with create_task_group() as tg:
+            if process.stdout:
+                tg.start_soon(drain_stream, process.stdout, 0)
+
+            if process.stderr:
+                tg.start_soon(drain_stream, process.stderr, 1)
+
+            if process.stdin and input:
+                await process.stdin.send(input)
+                await process.stdin.aclose()
+
+            await process.wait()
+
+    output, errors = stream_contents
+    if check and process.returncode != 0:
+        raise CalledProcessError(cast(int, process.returncode), command, output, errors)
+
+    return CompletedProcess(command, cast(int, process.returncode), output, errors)
+
+
+async def open_process(
+    command: StrOrBytesPath | Sequence[StrOrBytesPath],
+    *,
+    stdin: int | IO[Any] | None = PIPE,
+    stdout: int | IO[Any] | None = PIPE,
+    stderr: int | IO[Any] | None = PIPE,
+    cwd: StrOrBytesPath | None = None,
+    env: Mapping[str, str] | None = None,
+    startupinfo: Any = None,
+    creationflags: int = 0,
+    start_new_session: bool = False,
+    pass_fds: Sequence[int] = (),
+    user: str | int | None = None,
+    group: str | int | None = None,
+    extra_groups: Iterable[str | int] | None = None,
+    umask: int = -1,
+) -> Process:
+    """
+    Start an external command in a subprocess.
+
+    .. seealso:: :class:`subprocess.Popen`
+
+    :param command: either a string to pass to the shell, or an iterable of strings
+        containing the executable name or path and its arguments
+    :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
+        file-like object, or ``None``
+    :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+        a file-like object, or ``None``
+    :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+        :data:`subprocess.STDOUT`, a file-like object, or ``None``
+    :param cwd: If not ``None``, the working directory is changed before executing
+    :param env: If env is not ``None``, it must be a mapping that defines the
+        environment variables for the new process
+    :param creationflags: flags that can be used to control the creation of the
+        subprocess (see :class:`subprocess.Popen` for the specifics)
+    :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
+        to specify process startup parameters (Windows only)
+    :param start_new_session: if ``true`` the setsid() system call will be made in the
+        child process prior to the execution of the subprocess. (POSIX only)
+    :param pass_fds: sequence of file descriptors to keep open between the parent and
+        child processes. (POSIX only)
+    :param user: effective user to run the process as (POSIX only)
+    :param group: effective group to run the process as (POSIX only)
+    :param extra_groups: supplementary groups to set in the subprocess (POSIX only)
+    :param umask: if not negative, this umask is applied in the child process before
+        running the given command (POSIX only)
+    :return: an asynchronous process object
+
+    """
+    kwargs: dict[str, Any] = {}
+    if user is not None:
+        kwargs["user"] = user
+
+    if group is not None:
+        kwargs["group"] = group
+
+    if extra_groups is not None:
+        kwargs["extra_groups"] = group
+
+    if umask >= 0:
+        kwargs["umask"] = umask
+
+    return await get_async_backend().open_process(
+        command,
+        stdin=stdin,
+        stdout=stdout,
+        stderr=stderr,
+        cwd=cwd,
+        env=env,
+        startupinfo=startupinfo,
+        creationflags=creationflags,
+        start_new_session=start_new_session,
+        pass_fds=pass_fds,
+        **kwargs,
+    )
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_synchronization.py b/.venv/lib/python3.12/site-packages/anyio/_core/_synchronization.py
new file mode 100644
index 00000000..a6331328
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_synchronization.py
@@ -0,0 +1,732 @@
+from __future__ import annotations
+
+import math
+from collections import deque
+from dataclasses import dataclass
+from types import TracebackType
+
+from sniffio import AsyncLibraryNotFoundError
+
+from ..lowlevel import checkpoint
+from ._eventloop import get_async_backend
+from ._exceptions import BusyResourceError
+from ._tasks import CancelScope
+from ._testing import TaskInfo, get_current_task
+
+
+@dataclass(frozen=True)
+class EventStatistics:
+    """
+    :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
+    """
+
+    tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class CapacityLimiterStatistics:
+    """
+    :ivar int borrowed_tokens: number of tokens currently borrowed by tasks
+    :ivar float total_tokens: total number of available tokens
+    :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
+        this limiter
+    :ivar int tasks_waiting: number of tasks waiting on
+        :meth:`~.CapacityLimiter.acquire` or
+        :meth:`~.CapacityLimiter.acquire_on_behalf_of`
+    """
+
+    borrowed_tokens: int
+    total_tokens: float
+    borrowers: tuple[object, ...]
+    tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class LockStatistics:
+    """
+    :ivar bool locked: flag indicating if this lock is locked or not
+    :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
+        lock is not held by any task)
+    :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
+    """
+
+    locked: bool
+    owner: TaskInfo | None
+    tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class ConditionStatistics:
+    """
+    :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
+    :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
+        :class:`~.Lock`
+    """
+
+    tasks_waiting: int
+    lock_statistics: LockStatistics
+
+
+@dataclass(frozen=True)
+class SemaphoreStatistics:
+    """
+    :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
+
+    """
+
+    tasks_waiting: int
+
+
+class Event:
+    def __new__(cls) -> Event:
+        try:
+            return get_async_backend().create_event()
+        except AsyncLibraryNotFoundError:
+            return EventAdapter()
+
+    def set(self) -> None:
+        """Set the flag, notifying all listeners."""
+        raise NotImplementedError
+
+    def is_set(self) -> bool:
+        """Return ``True`` if the flag is set, ``False`` if not."""
+        raise NotImplementedError
+
+    async def wait(self) -> None:
+        """
+        Wait until the flag has been set.
+
+        If the flag has already been set when this method is called, it returns
+        immediately.
+
+        """
+        raise NotImplementedError
+
+    def statistics(self) -> EventStatistics:
+        """Return statistics about the current state of this event."""
+        raise NotImplementedError
+
+
+class EventAdapter(Event):
+    _internal_event: Event | None = None
+    _is_set: bool = False
+
+    def __new__(cls) -> EventAdapter:
+        return object.__new__(cls)
+
+    @property
+    def _event(self) -> Event:
+        if self._internal_event is None:
+            self._internal_event = get_async_backend().create_event()
+            if self._is_set:
+                self._internal_event.set()
+
+        return self._internal_event
+
+    def set(self) -> None:
+        if self._internal_event is None:
+            self._is_set = True
+        else:
+            self._event.set()
+
+    def is_set(self) -> bool:
+        if self._internal_event is None:
+            return self._is_set
+
+        return self._internal_event.is_set()
+
+    async def wait(self) -> None:
+        await self._event.wait()
+
+    def statistics(self) -> EventStatistics:
+        if self._internal_event is None:
+            return EventStatistics(tasks_waiting=0)
+
+        return self._internal_event.statistics()
+
+
+class Lock:
+    def __new__(cls, *, fast_acquire: bool = False) -> Lock:
+        try:
+            return get_async_backend().create_lock(fast_acquire=fast_acquire)
+        except AsyncLibraryNotFoundError:
+            return LockAdapter(fast_acquire=fast_acquire)
+
+    async def __aenter__(self) -> None:
+        await self.acquire()
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.release()
+
+    async def acquire(self) -> None:
+        """Acquire the lock."""
+        raise NotImplementedError
+
+    def acquire_nowait(self) -> None:
+        """
+        Acquire the lock, without blocking.
+
+        :raises ~anyio.WouldBlock: if the operation would block
+
+        """
+        raise NotImplementedError
+
+    def release(self) -> None:
+        """Release the lock."""
+        raise NotImplementedError
+
+    def locked(self) -> bool:
+        """Return True if the lock is currently held."""
+        raise NotImplementedError
+
+    def statistics(self) -> LockStatistics:
+        """
+        Return statistics about the current state of this lock.
+
+        .. versionadded:: 3.0
+        """
+        raise NotImplementedError
+
+
+class LockAdapter(Lock):
+    _internal_lock: Lock | None = None
+
+    def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:
+        return object.__new__(cls)
+
+    def __init__(self, *, fast_acquire: bool = False):
+        self._fast_acquire = fast_acquire
+
+    @property
+    def _lock(self) -> Lock:
+        if self._internal_lock is None:
+            self._internal_lock = get_async_backend().create_lock(
+                fast_acquire=self._fast_acquire
+            )
+
+        return self._internal_lock
+
+    async def __aenter__(self) -> None:
+        await self._lock.acquire()
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self._internal_lock is not None:
+            self._internal_lock.release()
+
+    async def acquire(self) -> None:
+        """Acquire the lock."""
+        await self._lock.acquire()
+
+    def acquire_nowait(self) -> None:
+        """
+        Acquire the lock, without blocking.
+
+        :raises ~anyio.WouldBlock: if the operation would block
+
+        """
+        self._lock.acquire_nowait()
+
+    def release(self) -> None:
+        """Release the lock."""
+        self._lock.release()
+
+    def locked(self) -> bool:
+        """Return True if the lock is currently held."""
+        return self._lock.locked()
+
+    def statistics(self) -> LockStatistics:
+        """
+        Return statistics about the current state of this lock.
+
+        .. versionadded:: 3.0
+
+        """
+        if self._internal_lock is None:
+            return LockStatistics(False, None, 0)
+
+        return self._internal_lock.statistics()
+
+
+class Condition:
+    _owner_task: TaskInfo | None = None
+
+    def __init__(self, lock: Lock | None = None):
+        self._lock = lock or Lock()
+        self._waiters: deque[Event] = deque()
+
+    async def __aenter__(self) -> None:
+        await self.acquire()
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.release()
+
+    def _check_acquired(self) -> None:
+        if self._owner_task != get_current_task():
+            raise RuntimeError("The current task is not holding the underlying lock")
+
+    async def acquire(self) -> None:
+        """Acquire the underlying lock."""
+        await self._lock.acquire()
+        self._owner_task = get_current_task()
+
+    def acquire_nowait(self) -> None:
+        """
+        Acquire the underlying lock, without blocking.
+
+        :raises ~anyio.WouldBlock: if the operation would block
+
+        """
+        self._lock.acquire_nowait()
+        self._owner_task = get_current_task()
+
+    def release(self) -> None:
+        """Release the underlying lock."""
+        self._lock.release()
+
+    def locked(self) -> bool:
+        """Return True if the lock is set."""
+        return self._lock.locked()
+
+    def notify(self, n: int = 1) -> None:
+        """Notify exactly n listeners."""
+        self._check_acquired()
+        for _ in range(n):
+            try:
+                event = self._waiters.popleft()
+            except IndexError:
+                break
+
+            event.set()
+
+    def notify_all(self) -> None:
+        """Notify all the listeners."""
+        self._check_acquired()
+        for event in self._waiters:
+            event.set()
+
+        self._waiters.clear()
+
+    async def wait(self) -> None:
+        """Wait for a notification."""
+        await checkpoint()
+        event = Event()
+        self._waiters.append(event)
+        self.release()
+        try:
+            await event.wait()
+        except BaseException:
+            if not event.is_set():
+                self._waiters.remove(event)
+
+            raise
+        finally:
+            with CancelScope(shield=True):
+                await self.acquire()
+
+    def statistics(self) -> ConditionStatistics:
+        """
+        Return statistics about the current state of this condition.
+
+        .. versionadded:: 3.0
+        """
+        return ConditionStatistics(len(self._waiters), self._lock.statistics())
+
+
+class Semaphore:
+    def __new__(
+        cls,
+        initial_value: int,
+        *,
+        max_value: int | None = None,
+        fast_acquire: bool = False,
+    ) -> Semaphore:
+        try:
+            return get_async_backend().create_semaphore(
+                initial_value, max_value=max_value, fast_acquire=fast_acquire
+            )
+        except AsyncLibraryNotFoundError:
+            return SemaphoreAdapter(initial_value, max_value=max_value)
+
+    def __init__(
+        self,
+        initial_value: int,
+        *,
+        max_value: int | None = None,
+        fast_acquire: bool = False,
+    ):
+        if not isinstance(initial_value, int):
+            raise TypeError("initial_value must be an integer")
+        if initial_value < 0:
+            raise ValueError("initial_value must be >= 0")
+        if max_value is not None:
+            if not isinstance(max_value, int):
+                raise TypeError("max_value must be an integer or None")
+            if max_value < initial_value:
+                raise ValueError(
+                    "max_value must be equal to or higher than initial_value"
+                )
+
+        self._fast_acquire = fast_acquire
+
+    async def __aenter__(self) -> Semaphore:
+        await self.acquire()
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.release()
+
+    async def acquire(self) -> None:
+        """Decrement the semaphore value, blocking if necessary."""
+        raise NotImplementedError
+
+    def acquire_nowait(self) -> None:
+        """
+        Acquire the underlying lock, without blocking.
+
+        :raises ~anyio.WouldBlock: if the operation would block
+
+        """
+        raise NotImplementedError
+
+    def release(self) -> None:
+        """Increment the semaphore value."""
+        raise NotImplementedError
+
+    @property
+    def value(self) -> int:
+        """The current value of the semaphore."""
+        raise NotImplementedError
+
+    @property
+    def max_value(self) -> int | None:
+        """The maximum value of the semaphore."""
+        raise NotImplementedError
+
+    def statistics(self) -> SemaphoreStatistics:
+        """
+        Return statistics about the current state of this semaphore.
+
+        .. versionadded:: 3.0
+        """
+        raise NotImplementedError
+
+
+class SemaphoreAdapter(Semaphore):
+    _internal_semaphore: Semaphore | None = None
+
+    def __new__(
+        cls,
+        initial_value: int,
+        *,
+        max_value: int | None = None,
+        fast_acquire: bool = False,
+    ) -> SemaphoreAdapter:
+        return object.__new__(cls)
+
+    def __init__(
+        self,
+        initial_value: int,
+        *,
+        max_value: int | None = None,
+        fast_acquire: bool = False,
+    ) -> None:
+        super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
+        self._initial_value = initial_value
+        self._max_value = max_value
+
+    @property
+    def _semaphore(self) -> Semaphore:
+        if self._internal_semaphore is None:
+            self._internal_semaphore = get_async_backend().create_semaphore(
+                self._initial_value, max_value=self._max_value
+            )
+
+        return self._internal_semaphore
+
+    async def acquire(self) -> None:
+        await self._semaphore.acquire()
+
+    def acquire_nowait(self) -> None:
+        self._semaphore.acquire_nowait()
+
+    def release(self) -> None:
+        self._semaphore.release()
+
+    @property
+    def value(self) -> int:
+        if self._internal_semaphore is None:
+            return self._initial_value
+
+        return self._semaphore.value
+
+    @property
+    def max_value(self) -> int | None:
+        return self._max_value
+
+    def statistics(self) -> SemaphoreStatistics:
+        if self._internal_semaphore is None:
+            return SemaphoreStatistics(tasks_waiting=0)
+
+        return self._semaphore.statistics()
+
+
+class CapacityLimiter:
+    def __new__(cls, total_tokens: float) -> CapacityLimiter:
+        try:
+            return get_async_backend().create_capacity_limiter(total_tokens)
+        except AsyncLibraryNotFoundError:
+            return CapacityLimiterAdapter(total_tokens)
+
+    async def __aenter__(self) -> None:
+        raise NotImplementedError
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> bool | None:
+        raise NotImplementedError
+
+    @property
+    def total_tokens(self) -> float:
+        """
+        The total number of tokens available for borrowing.
+
+        This is a read-write property. If the total number of tokens is increased, the
+        proportionate number of tasks waiting on this limiter will be granted their
+        tokens.
+
+        .. versionchanged:: 3.0
+            The property is now writable.
+
+        """
+        raise NotImplementedError
+
+    @total_tokens.setter
+    def total_tokens(self, value: float) -> None:
+        raise NotImplementedError
+
+    @property
+    def borrowed_tokens(self) -> int:
+        """The number of tokens that have currently been borrowed."""
+        raise NotImplementedError
+
+    @property
+    def available_tokens(self) -> float:
+        """The number of tokens currently available to be borrowed"""
+        raise NotImplementedError
+
+    def acquire_nowait(self) -> None:
+        """
+        Acquire a token for the current task without waiting for one to become
+        available.
+
+        :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
+
+        """
+        raise NotImplementedError
+
+    def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+        """
+        Acquire a token without waiting for one to become available.
+
+        :param borrower: the entity borrowing a token
+        :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
+
+        """
+        raise NotImplementedError
+
+    async def acquire(self) -> None:
+        """
+        Acquire a token for the current task, waiting if necessary for one to become
+        available.
+
+        """
+        raise NotImplementedError
+
+    async def acquire_on_behalf_of(self, borrower: object) -> None:
+        """
+        Acquire a token, waiting if necessary for one to become available.
+
+        :param borrower: the entity borrowing a token
+
+        """
+        raise NotImplementedError
+
+    def release(self) -> None:
+        """
+        Release the token held by the current task.
+
+        :raises RuntimeError: if the current task has not borrowed a token from this
+            limiter.
+
+        """
+        raise NotImplementedError
+
+    def release_on_behalf_of(self, borrower: object) -> None:
+        """
+        Release the token held by the given borrower.
+
+        :raises RuntimeError: if the borrower has not borrowed a token from this
+            limiter.
+
+        """
+        raise NotImplementedError
+
+    def statistics(self) -> CapacityLimiterStatistics:
+        """
+        Return statistics about the current state of this limiter.
+
+        .. versionadded:: 3.0
+
+        """
+        raise NotImplementedError
+
+
+class CapacityLimiterAdapter(CapacityLimiter):
+    _internal_limiter: CapacityLimiter | None = None
+
+    def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
+        return object.__new__(cls)
+
+    def __init__(self, total_tokens: float) -> None:
+        self.total_tokens = total_tokens
+
+    @property
+    def _limiter(self) -> CapacityLimiter:
+        if self._internal_limiter is None:
+            self._internal_limiter = get_async_backend().create_capacity_limiter(
+                self._total_tokens
+            )
+
+        return self._internal_limiter
+
+    async def __aenter__(self) -> None:
+        await self._limiter.__aenter__()
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> bool | None:
+        return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
+
+    @property
+    def total_tokens(self) -> float:
+        if self._internal_limiter is None:
+            return self._total_tokens
+
+        return self._internal_limiter.total_tokens
+
+    @total_tokens.setter
+    def total_tokens(self, value: float) -> None:
+        if not isinstance(value, int) and value is not math.inf:
+            raise TypeError("total_tokens must be an int or math.inf")
+        elif value < 1:
+            raise ValueError("total_tokens must be >= 1")
+
+        if self._internal_limiter is None:
+            self._total_tokens = value
+            return
+
+        self._limiter.total_tokens = value
+
+    @property
+    def borrowed_tokens(self) -> int:
+        if self._internal_limiter is None:
+            return 0
+
+        return self._internal_limiter.borrowed_tokens
+
+    @property
+    def available_tokens(self) -> float:
+        if self._internal_limiter is None:
+            return self._total_tokens
+
+        return self._internal_limiter.available_tokens
+
+    def acquire_nowait(self) -> None:
+        self._limiter.acquire_nowait()
+
+    def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+        self._limiter.acquire_on_behalf_of_nowait(borrower)
+
+    async def acquire(self) -> None:
+        await self._limiter.acquire()
+
+    async def acquire_on_behalf_of(self, borrower: object) -> None:
+        await self._limiter.acquire_on_behalf_of(borrower)
+
+    def release(self) -> None:
+        self._limiter.release()
+
+    def release_on_behalf_of(self, borrower: object) -> None:
+        self._limiter.release_on_behalf_of(borrower)
+
+    def statistics(self) -> CapacityLimiterStatistics:
+        if self._internal_limiter is None:
+            return CapacityLimiterStatistics(
+                borrowed_tokens=0,
+                total_tokens=self.total_tokens,
+                borrowers=(),
+                tasks_waiting=0,
+            )
+
+        return self._internal_limiter.statistics()
+
+
+class ResourceGuard:
+    """
+    A context manager for ensuring that a resource is only used by a single task at a
+    time.
+
+    Entering this context manager while the previous has not exited it yet will trigger
+    :exc:`BusyResourceError`.
+
+    :param action: the action to guard against (visible in the :exc:`BusyResourceError`
+        when triggered, e.g. "Another task is already {action} this resource")
+
+    .. versionadded:: 4.1
+    """
+
+    __slots__ = "action", "_guarded"
+
+    def __init__(self, action: str = "using"):
+        self.action: str = action
+        self._guarded = False
+
+    def __enter__(self) -> None:
+        if self._guarded:
+            raise BusyResourceError(self.action)
+
+        self._guarded = True
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self._guarded = False
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_tasks.py b/.venv/lib/python3.12/site-packages/anyio/_core/_tasks.py
new file mode 100644
index 00000000..fe490151
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_tasks.py
@@ -0,0 +1,158 @@
+from __future__ import annotations
+
+import math
+from collections.abc import Generator
+from contextlib import contextmanager
+from types import TracebackType
+
+from ..abc._tasks import TaskGroup, TaskStatus
+from ._eventloop import get_async_backend
+
+
+class _IgnoredTaskStatus(TaskStatus[object]):
+    def started(self, value: object = None) -> None:
+        pass
+
+
+TASK_STATUS_IGNORED = _IgnoredTaskStatus()
+
+
+class CancelScope:
+    """
+    Wraps a unit of work that can be made separately cancellable.
+
+    :param deadline: The time (clock value) when this scope is cancelled automatically
+    :param shield: ``True`` to shield the cancel scope from external cancellation
+    """
+
+    def __new__(
+        cls, *, deadline: float = math.inf, shield: bool = False
+    ) -> CancelScope:
+        return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
+
+    def cancel(self) -> None:
+        """Cancel this scope immediately."""
+        raise NotImplementedError
+
+    @property
+    def deadline(self) -> float:
+        """
+        The time (clock value) when this scope is cancelled automatically.
+
+        Will be ``float('inf')`` if no timeout has been set.
+
+        """
+        raise NotImplementedError
+
+    @deadline.setter
+    def deadline(self, value: float) -> None:
+        raise NotImplementedError
+
+    @property
+    def cancel_called(self) -> bool:
+        """``True`` if :meth:`cancel` has been called."""
+        raise NotImplementedError
+
+    @property
+    def cancelled_caught(self) -> bool:
+        """
+        ``True`` if this scope suppressed a cancellation exception it itself raised.
+
+        This is typically used to check if any work was interrupted, or to see if the
+        scope was cancelled due to its deadline being reached. The value will, however,
+        only be ``True`` if the cancellation was triggered by the scope itself (and not
+        an outer scope).
+
+        """
+        raise NotImplementedError
+
+    @property
+    def shield(self) -> bool:
+        """
+        ``True`` if this scope is shielded from external cancellation.
+
+        While a scope is shielded, it will not receive cancellations from outside.
+
+        """
+        raise NotImplementedError
+
+    @shield.setter
+    def shield(self, value: bool) -> None:
+        raise NotImplementedError
+
+    def __enter__(self) -> CancelScope:
+        raise NotImplementedError
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_val: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> bool:
+        raise NotImplementedError
+
+
+@contextmanager
+def fail_after(
+    delay: float | None, shield: bool = False
+) -> Generator[CancelScope, None, None]:
+    """
+    Create a context manager which raises a :class:`TimeoutError` if does not finish in
+    time.
+
+    :param delay: maximum allowed time (in seconds) before raising the exception, or
+        ``None`` to disable the timeout
+    :param shield: ``True`` to shield the cancel scope from external cancellation
+    :return: a context manager that yields a cancel scope
+    :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
+
+    """
+    current_time = get_async_backend().current_time
+    deadline = (current_time() + delay) if delay is not None else math.inf
+    with get_async_backend().create_cancel_scope(
+        deadline=deadline, shield=shield
+    ) as cancel_scope:
+        yield cancel_scope
+
+    if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
+        raise TimeoutError
+
+
+def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
+    """
+    Create a cancel scope with a deadline that expires after the given delay.
+
+    :param delay: maximum allowed time (in seconds) before exiting the context block, or
+        ``None`` to disable the timeout
+    :param shield: ``True`` to shield the cancel scope from external cancellation
+    :return: a cancel scope
+
+    """
+    deadline = (
+        (get_async_backend().current_time() + delay) if delay is not None else math.inf
+    )
+    return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
+
+
+def current_effective_deadline() -> float:
+    """
+    Return the nearest deadline among all the cancel scopes effective for the current
+    task.
+
+    :return: a clock value from the event loop's internal clock (or ``float('inf')`` if
+        there is no deadline in effect, or ``float('-inf')`` if the current scope has
+        been cancelled)
+    :rtype: float
+
+    """
+    return get_async_backend().current_effective_deadline()
+
+
+def create_task_group() -> TaskGroup:
+    """
+    Create a task group.
+
+    :return: a task group
+
+    """
+    return get_async_backend().create_task_group()
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_tempfile.py b/.venv/lib/python3.12/site-packages/anyio/_core/_tempfile.py
new file mode 100644
index 00000000..26d70eca
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_tempfile.py
@@ -0,0 +1,616 @@
+from __future__ import annotations
+
+import os
+import sys
+import tempfile
+from collections.abc import Iterable
+from io import BytesIO, TextIOWrapper
+from types import TracebackType
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    AnyStr,
+    Generic,
+    overload,
+)
+
+from .. import to_thread
+from .._core._fileio import AsyncFile
+from ..lowlevel import checkpoint_if_cancelled
+
+if TYPE_CHECKING:
+    from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
+
+
+class TemporaryFile(Generic[AnyStr]):
+    """
+    An asynchronous temporary file that is automatically created and cleaned up.
+
+    This class provides an asynchronous context manager interface to a temporary file.
+    The file is created using Python's standard `tempfile.TemporaryFile` function in a
+    background thread, and is wrapped as an asynchronous file using `AsyncFile`.
+
+    :param mode: The mode in which the file is opened. Defaults to "w+b".
+    :param buffering: The buffering policy (-1 means the default buffering).
+    :param encoding: The encoding used to decode or encode the file. Only applicable in
+        text mode.
+    :param newline: Controls how universal newlines mode works (only applicable in text
+        mode).
+    :param suffix: The suffix for the temporary file name.
+    :param prefix: The prefix for the temporary file name.
+    :param dir: The directory in which the temporary file is created.
+    :param errors: The error handling scheme used for encoding/decoding errors.
+    """
+
+    _async_file: AsyncFile[AnyStr]
+
+    @overload
+    def __init__(
+        self: TemporaryFile[bytes],
+        mode: OpenBinaryMode = ...,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        *,
+        errors: str | None = ...,
+    ): ...
+    @overload
+    def __init__(
+        self: TemporaryFile[str],
+        mode: OpenTextMode,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        *,
+        errors: str | None = ...,
+    ): ...
+
+    def __init__(
+        self,
+        mode: OpenTextMode | OpenBinaryMode = "w+b",
+        buffering: int = -1,
+        encoding: str | None = None,
+        newline: str | None = None,
+        suffix: str | None = None,
+        prefix: str | None = None,
+        dir: str | None = None,
+        *,
+        errors: str | None = None,
+    ) -> None:
+        self.mode = mode
+        self.buffering = buffering
+        self.encoding = encoding
+        self.newline = newline
+        self.suffix: str | None = suffix
+        self.prefix: str | None = prefix
+        self.dir: str | None = dir
+        self.errors = errors
+
+    async def __aenter__(self) -> AsyncFile[AnyStr]:
+        fp = await to_thread.run_sync(
+            lambda: tempfile.TemporaryFile(
+                self.mode,
+                self.buffering,
+                self.encoding,
+                self.newline,
+                self.suffix,
+                self.prefix,
+                self.dir,
+                errors=self.errors,
+            )
+        )
+        self._async_file = AsyncFile(fp)
+        return self._async_file
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_value: BaseException | None,
+        traceback: TracebackType | None,
+    ) -> None:
+        await self._async_file.aclose()
+
+
+class NamedTemporaryFile(Generic[AnyStr]):
+    """
+    An asynchronous named temporary file that is automatically created and cleaned up.
+
+    This class provides an asynchronous context manager for a temporary file with a
+    visible name in the file system. It uses Python's standard
+    :func:`~tempfile.NamedTemporaryFile` function and wraps the file object with
+    :class:`AsyncFile` for asynchronous operations.
+
+    :param mode: The mode in which the file is opened. Defaults to "w+b".
+    :param buffering: The buffering policy (-1 means the default buffering).
+    :param encoding: The encoding used to decode or encode the file. Only applicable in
+        text mode.
+    :param newline: Controls how universal newlines mode works (only applicable in text
+        mode).
+    :param suffix: The suffix for the temporary file name.
+    :param prefix: The prefix for the temporary file name.
+    :param dir: The directory in which the temporary file is created.
+    :param delete: Whether to delete the file when it is closed.
+    :param errors: The error handling scheme used for encoding/decoding errors.
+    :param delete_on_close: (Python 3.12+) Whether to delete the file on close.
+    """
+
+    _async_file: AsyncFile[AnyStr]
+
+    @overload
+    def __init__(
+        self: NamedTemporaryFile[bytes],
+        mode: OpenBinaryMode = ...,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        delete: bool = ...,
+        *,
+        errors: str | None = ...,
+        delete_on_close: bool = ...,
+    ): ...
+    @overload
+    def __init__(
+        self: NamedTemporaryFile[str],
+        mode: OpenTextMode,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        delete: bool = ...,
+        *,
+        errors: str | None = ...,
+        delete_on_close: bool = ...,
+    ): ...
+
+    def __init__(
+        self,
+        mode: OpenBinaryMode | OpenTextMode = "w+b",
+        buffering: int = -1,
+        encoding: str | None = None,
+        newline: str | None = None,
+        suffix: str | None = None,
+        prefix: str | None = None,
+        dir: str | None = None,
+        delete: bool = True,
+        *,
+        errors: str | None = None,
+        delete_on_close: bool = True,
+    ) -> None:
+        self._params: dict[str, Any] = {
+            "mode": mode,
+            "buffering": buffering,
+            "encoding": encoding,
+            "newline": newline,
+            "suffix": suffix,
+            "prefix": prefix,
+            "dir": dir,
+            "delete": delete,
+            "errors": errors,
+        }
+        if sys.version_info >= (3, 12):
+            self._params["delete_on_close"] = delete_on_close
+
+    async def __aenter__(self) -> AsyncFile[AnyStr]:
+        fp = await to_thread.run_sync(
+            lambda: tempfile.NamedTemporaryFile(**self._params)
+        )
+        self._async_file = AsyncFile(fp)
+        return self._async_file
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_value: BaseException | None,
+        traceback: TracebackType | None,
+    ) -> None:
+        await self._async_file.aclose()
+
+
+class SpooledTemporaryFile(AsyncFile[AnyStr]):
+    """
+    An asynchronous spooled temporary file that starts in memory and is spooled to disk.
+
+    This class provides an asynchronous interface to a spooled temporary file, much like
+    Python's standard :class:`~tempfile.SpooledTemporaryFile`. It supports asynchronous
+    write operations and provides a method to force a rollover to disk.
+
+    :param max_size: Maximum size in bytes before the file is rolled over to disk.
+    :param mode: The mode in which the file is opened. Defaults to "w+b".
+    :param buffering: The buffering policy (-1 means the default buffering).
+    :param encoding: The encoding used to decode or encode the file (text mode only).
+    :param newline: Controls how universal newlines mode works (text mode only).
+    :param suffix: The suffix for the temporary file name.
+    :param prefix: The prefix for the temporary file name.
+    :param dir: The directory in which the temporary file is created.
+    :param errors: The error handling scheme used for encoding/decoding errors.
+    """
+
+    _rolled: bool = False
+
+    @overload
+    def __init__(
+        self: SpooledTemporaryFile[bytes],
+        max_size: int = ...,
+        mode: OpenBinaryMode = ...,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        *,
+        errors: str | None = ...,
+    ): ...
+    @overload
+    def __init__(
+        self: SpooledTemporaryFile[str],
+        max_size: int = ...,
+        mode: OpenTextMode = ...,
+        buffering: int = ...,
+        encoding: str | None = ...,
+        newline: str | None = ...,
+        suffix: str | None = ...,
+        prefix: str | None = ...,
+        dir: str | None = ...,
+        *,
+        errors: str | None = ...,
+    ): ...
+
+    def __init__(
+        self,
+        max_size: int = 0,
+        mode: OpenBinaryMode | OpenTextMode = "w+b",
+        buffering: int = -1,
+        encoding: str | None = None,
+        newline: str | None = None,
+        suffix: str | None = None,
+        prefix: str | None = None,
+        dir: str | None = None,
+        *,
+        errors: str | None = None,
+    ) -> None:
+        self._tempfile_params: dict[str, Any] = {
+            "mode": mode,
+            "buffering": buffering,
+            "encoding": encoding,
+            "newline": newline,
+            "suffix": suffix,
+            "prefix": prefix,
+            "dir": dir,
+            "errors": errors,
+        }
+        self._max_size = max_size
+        if "b" in mode:
+            super().__init__(BytesIO())  # type: ignore[arg-type]
+        else:
+            super().__init__(
+                TextIOWrapper(  # type: ignore[arg-type]
+                    BytesIO(),
+                    encoding=encoding,
+                    errors=errors,
+                    newline=newline,
+                    write_through=True,
+                )
+            )
+
+    async def aclose(self) -> None:
+        if not self._rolled:
+            self._fp.close()
+            return
+
+        await super().aclose()
+
+    async def _check(self) -> None:
+        if self._rolled or self._fp.tell() < self._max_size:
+            return
+
+        await self.rollover()
+
+    async def rollover(self) -> None:
+        if self._rolled:
+            return
+
+        self._rolled = True
+        buffer = self._fp
+        buffer.seek(0)
+        self._fp = await to_thread.run_sync(
+            lambda: tempfile.TemporaryFile(**self._tempfile_params)
+        )
+        await self.write(buffer.read())
+        buffer.close()
+
+    @property
+    def closed(self) -> bool:
+        return self._fp.closed
+
+    async def read(self, size: int = -1) -> AnyStr:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.read(size)
+
+        return await super().read(size)  # type: ignore[return-value]
+
+    async def read1(self: SpooledTemporaryFile[bytes], size: int = -1) -> bytes:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.read1(size)
+
+        return await super().read1(size)
+
+    async def readline(self) -> AnyStr:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.readline()
+
+        return await super().readline()  # type: ignore[return-value]
+
+    async def readlines(self) -> list[AnyStr]:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.readlines()
+
+        return await super().readlines()  # type: ignore[return-value]
+
+    async def readinto(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            self._fp.readinto(b)
+
+        return await super().readinto(b)
+
+    async def readinto1(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            self._fp.readinto(b)
+
+        return await super().readinto1(b)
+
+    async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.seek(offset, whence)
+
+        return await super().seek(offset, whence)
+
+    async def tell(self) -> int:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.tell()
+
+        return await super().tell()
+
+    async def truncate(self, size: int | None = None) -> int:
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            return self._fp.truncate(size)
+
+        return await super().truncate(size)
+
+    @overload
+    async def write(self: SpooledTemporaryFile[bytes], b: ReadableBuffer) -> int: ...
+    @overload
+    async def write(self: SpooledTemporaryFile[str], b: str) -> int: ...
+
+    async def write(self, b: ReadableBuffer | str) -> int:
+        """
+        Asynchronously write data to the spooled temporary file.
+
+        If the file has not yet been rolled over, the data is written synchronously,
+        and a rollover is triggered if the size exceeds the maximum size.
+
+        :param s: The data to write.
+        :return: The number of bytes written.
+        :raises RuntimeError: If the underlying file is not initialized.
+
+        """
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            result = self._fp.write(b)
+            await self._check()
+            return result
+
+        return await super().write(b)  # type: ignore[misc]
+
+    @overload
+    async def writelines(
+        self: SpooledTemporaryFile[bytes], lines: Iterable[ReadableBuffer]
+    ) -> None: ...
+    @overload
+    async def writelines(
+        self: SpooledTemporaryFile[str], lines: Iterable[str]
+    ) -> None: ...
+
+    async def writelines(self, lines: Iterable[str] | Iterable[ReadableBuffer]) -> None:
+        """
+        Asynchronously write a list of lines to the spooled temporary file.
+
+        If the file has not yet been rolled over, the lines are written synchronously,
+        and a rollover is triggered if the size exceeds the maximum size.
+
+        :param lines: An iterable of lines to write.
+        :raises RuntimeError: If the underlying file is not initialized.
+
+        """
+        if not self._rolled:
+            await checkpoint_if_cancelled()
+            result = self._fp.writelines(lines)
+            await self._check()
+            return result
+
+        return await super().writelines(lines)  # type: ignore[misc]
+
+
+class TemporaryDirectory(Generic[AnyStr]):
+    """
+    An asynchronous temporary directory that is created and cleaned up automatically.
+
+    This class provides an asynchronous context manager for creating a temporary
+    directory. It wraps Python's standard :class:`~tempfile.TemporaryDirectory` to
+    perform directory creation and cleanup operations in a background thread.
+
+    :param suffix: Suffix to be added to the temporary directory name.
+    :param prefix: Prefix to be added to the temporary directory name.
+    :param dir: The parent directory where the temporary directory is created.
+    :param ignore_cleanup_errors: Whether to ignore errors during cleanup
+        (Python 3.10+).
+    :param delete: Whether to delete the directory upon closing (Python 3.12+).
+    """
+
+    def __init__(
+        self,
+        suffix: AnyStr | None = None,
+        prefix: AnyStr | None = None,
+        dir: AnyStr | None = None,
+        *,
+        ignore_cleanup_errors: bool = False,
+        delete: bool = True,
+    ) -> None:
+        self.suffix: AnyStr | None = suffix
+        self.prefix: AnyStr | None = prefix
+        self.dir: AnyStr | None = dir
+        self.ignore_cleanup_errors = ignore_cleanup_errors
+        self.delete = delete
+
+        self._tempdir: tempfile.TemporaryDirectory | None = None
+
+    async def __aenter__(self) -> str:
+        params: dict[str, Any] = {
+            "suffix": self.suffix,
+            "prefix": self.prefix,
+            "dir": self.dir,
+        }
+        if sys.version_info >= (3, 10):
+            params["ignore_cleanup_errors"] = self.ignore_cleanup_errors
+
+        if sys.version_info >= (3, 12):
+            params["delete"] = self.delete
+
+        self._tempdir = await to_thread.run_sync(
+            lambda: tempfile.TemporaryDirectory(**params)
+        )
+        return await to_thread.run_sync(self._tempdir.__enter__)
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc_value: BaseException | None,
+        traceback: TracebackType | None,
+    ) -> None:
+        if self._tempdir is not None:
+            await to_thread.run_sync(
+                self._tempdir.__exit__, exc_type, exc_value, traceback
+            )
+
+    async def cleanup(self) -> None:
+        if self._tempdir is not None:
+            await to_thread.run_sync(self._tempdir.cleanup)
+
+
+@overload
+async def mkstemp(
+    suffix: str | None = None,
+    prefix: str | None = None,
+    dir: str | None = None,
+    text: bool = False,
+) -> tuple[int, str]: ...
+
+
+@overload
+async def mkstemp(
+    suffix: bytes | None = None,
+    prefix: bytes | None = None,
+    dir: bytes | None = None,
+    text: bool = False,
+) -> tuple[int, bytes]: ...
+
+
+async def mkstemp(
+    suffix: AnyStr | None = None,
+    prefix: AnyStr | None = None,
+    dir: AnyStr | None = None,
+    text: bool = False,
+) -> tuple[int, str | bytes]:
+    """
+    Asynchronously create a temporary file and return an OS-level handle and the file
+    name.
+
+    This function wraps `tempfile.mkstemp` and executes it in a background thread.
+
+    :param suffix: Suffix to be added to the file name.
+    :param prefix: Prefix to be added to the file name.
+    :param dir: Directory in which the temporary file is created.
+    :param text: Whether the file is opened in text mode.
+    :return: A tuple containing the file descriptor and the file name.
+
+    """
+    return await to_thread.run_sync(tempfile.mkstemp, suffix, prefix, dir, text)
+
+
+@overload
+async def mkdtemp(
+    suffix: str | None = None,
+    prefix: str | None = None,
+    dir: str | None = None,
+) -> str: ...
+
+
+@overload
+async def mkdtemp(
+    suffix: bytes | None = None,
+    prefix: bytes | None = None,
+    dir: bytes | None = None,
+) -> bytes: ...
+
+
+async def mkdtemp(
+    suffix: AnyStr | None = None,
+    prefix: AnyStr | None = None,
+    dir: AnyStr | None = None,
+) -> str | bytes:
+    """
+    Asynchronously create a temporary directory and return its path.
+
+    This function wraps `tempfile.mkdtemp` and executes it in a background thread.
+
+    :param suffix: Suffix to be added to the directory name.
+    :param prefix: Prefix to be added to the directory name.
+    :param dir: Parent directory where the temporary directory is created.
+    :return: The path of the created temporary directory.
+
+    """
+    return await to_thread.run_sync(tempfile.mkdtemp, suffix, prefix, dir)
+
+
+async def gettempdir() -> str:
+    """
+    Asynchronously return the name of the directory used for temporary files.
+
+    This function wraps `tempfile.gettempdir` and executes it in a background thread.
+
+    :return: The path of the temporary directory as a string.
+
+    """
+    return await to_thread.run_sync(tempfile.gettempdir)
+
+
+async def gettempdirb() -> bytes:
+    """
+    Asynchronously return the name of the directory used for temporary files in bytes.
+
+    This function wraps `tempfile.gettempdirb` and executes it in a background thread.
+
+    :return: The path of the temporary directory as bytes.
+
+    """
+    return await to_thread.run_sync(tempfile.gettempdirb)
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_testing.py b/.venv/lib/python3.12/site-packages/anyio/_core/_testing.py
new file mode 100644
index 00000000..9e28b227
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_testing.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+from collections.abc import Awaitable, Generator
+from typing import Any, cast
+
+from ._eventloop import get_async_backend
+
+
+class TaskInfo:
+    """
+    Represents an asynchronous task.
+
+    :ivar int id: the unique identifier of the task
+    :ivar parent_id: the identifier of the parent task, if any
+    :vartype parent_id: Optional[int]
+    :ivar str name: the description of the task (if any)
+    :ivar ~collections.abc.Coroutine coro: the coroutine object of the task
+    """
+
+    __slots__ = "_name", "id", "parent_id", "name", "coro"
+
+    def __init__(
+        self,
+        id: int,
+        parent_id: int | None,
+        name: str | None,
+        coro: Generator[Any, Any, Any] | Awaitable[Any],
+    ):
+        func = get_current_task
+        self._name = f"{func.__module__}.{func.__qualname__}"
+        self.id: int = id
+        self.parent_id: int | None = parent_id
+        self.name: str | None = name
+        self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
+
+    def __eq__(self, other: object) -> bool:
+        if isinstance(other, TaskInfo):
+            return self.id == other.id
+
+        return NotImplemented
+
+    def __hash__(self) -> int:
+        return hash(self.id)
+
+    def __repr__(self) -> str:
+        return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
+
+    def has_pending_cancellation(self) -> bool:
+        """
+        Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
+
+        """
+        return False
+
+
+def get_current_task() -> TaskInfo:
+    """
+    Return the current task.
+
+    :return: a representation of the current task
+
+    """
+    return get_async_backend().get_current_task()
+
+
+def get_running_tasks() -> list[TaskInfo]:
+    """
+    Return a list of running tasks in the current event loop.
+
+    :return: a list of task info objects
+
+    """
+    return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
+
+
+async def wait_all_tasks_blocked() -> None:
+    """Wait until all other tasks are waiting for something."""
+    await get_async_backend().wait_all_tasks_blocked()
diff --git a/.venv/lib/python3.12/site-packages/anyio/_core/_typedattr.py b/.venv/lib/python3.12/site-packages/anyio/_core/_typedattr.py
new file mode 100644
index 00000000..f358a448
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/anyio/_core/_typedattr.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping
+from typing import Any, TypeVar, final, overload
+
+from ._exceptions import TypedAttributeLookupError
+
+T_Attr = TypeVar("T_Attr")
+T_Default = TypeVar("T_Default")
+undefined = object()
+
+
+def typed_attribute() -> Any:
+    """Return a unique object, used to mark typed attributes."""
+    return object()
+
+
+class TypedAttributeSet:
+    """
+    Superclass for typed attribute collections.
+
+    Checks that every public attribute of every subclass has a type annotation.
+    """
+
+    def __init_subclass__(cls) -> None:
+        annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
+        for attrname in dir(cls):
+            if not attrname.startswith("_") and attrname not in annotations:
+                raise TypeError(
+                    f"Attribute {attrname!r} is missing its type annotation"
+                )
+
+        super().__init_subclass__()
+
+
+class TypedAttributeProvider:
+    """Base class for classes that wish to provide typed extra attributes."""
+
+    @property
+    def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
+        """
+        A mapping of the extra attributes to callables that return the corresponding
+        values.
+
+        If the provider wraps another provider, the attributes from that wrapper should
+        also be included in the returned mapping (but the wrapper may override the
+        callables from the wrapped instance).
+
+        """
+        return {}
+
+    @overload
+    def extra(self, attribute: T_Attr) -> T_Attr: ...
+
+    @overload
+    def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
+
+    @final
+    def extra(self, attribute: Any, default: object = undefined) -> object:
+        """
+        extra(attribute, default=undefined)
+
+        Return the value of the given typed extra attribute.
+
+        :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
+            look for
+        :param default: the value that should be returned if no value is found for the
+            attribute
+        :raises ~anyio.TypedAttributeLookupError: if the search failed and no default
+            value was given
+
+        """
+        try:
+            getter = self.extra_attributes[attribute]
+        except KeyError:
+            if default is undefined:
+                raise TypedAttributeLookupError("Attribute not found") from None
+            else:
+                return default
+
+        return getter()