about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel')
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__init__.py3
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__main__.py23
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_bdist_wheel.py613
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_setuptools_logging.py26
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/bdist_wheel.py26
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/__init__.py155
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/convert.py332
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/pack.py85
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/tags.py139
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/unpack.py30
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/macosx_libfile.py482
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/metadata.py183
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/util.py17
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE3
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.APACHE177
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.BSD23
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_elffile.py108
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_manylinux.py260
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_musllinux.py83
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_parser.py356
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_structures.py61
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py192
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/markers.py253
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/requirements.py90
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/specifiers.py1011
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/tags.py571
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/utils.py172
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/version.py561
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/vendor.txt1
-rw-r--r--.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/wheelfile.py227
32 files changed, 6263 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__init__.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__init__.py
new file mode 100644
index 00000000..3ab8f72d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import annotations
+
+__version__ = "0.45.1"
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__main__.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__main__.py
new file mode 100644
index 00000000..0be74537
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/__main__.py
@@ -0,0 +1,23 @@
+"""
+Wheel command line tool (enable python -m wheel syntax)
+"""
+
+from __future__ import annotations
+
+import sys
+
+
+def main():  # needed for console script
+    if __package__ == "":
+        # To be able to run 'python wheel-0.9.whl/wheel':
+        import os.path
+
+        path = os.path.dirname(os.path.dirname(__file__))
+        sys.path[0:0] = [path]
+    import wheel.cli
+
+    sys.exit(wheel.cli.main())
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_bdist_wheel.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_bdist_wheel.py
new file mode 100644
index 00000000..88973ebf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_bdist_wheel.py
@@ -0,0 +1,613 @@
+"""
+Create a wheel (.whl) distribution.
+
+A wheel is a built archive format.
+"""
+
+from __future__ import annotations
+
+import os
+import re
+import shutil
+import stat
+import struct
+import sys
+import sysconfig
+import warnings
+from email.generator import BytesGenerator, Generator
+from email.policy import EmailPolicy
+from glob import iglob
+from shutil import rmtree
+from typing import TYPE_CHECKING, Callable, Iterable, Literal, Sequence, cast
+from zipfile import ZIP_DEFLATED, ZIP_STORED
+
+import setuptools
+from setuptools import Command
+
+from . import __version__ as wheel_version
+from .metadata import pkginfo_to_metadata
+from .util import log
+from .vendored.packaging import tags
+from .vendored.packaging import version as _packaging_version
+from .wheelfile import WheelFile
+
+if TYPE_CHECKING:
+    import types
+
+# ensure Python logging is configured
+try:
+    __import__("setuptools.logging")
+except ImportError:
+    # setuptools < ??
+    from . import _setuptools_logging
+
+    _setuptools_logging.configure()
+
+
+def safe_name(name: str) -> str:
+    """Convert an arbitrary string to a standard distribution name
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub("[^A-Za-z0-9.]+", "-", name)
+
+
+def safe_version(version: str) -> str:
+    """
+    Convert an arbitrary string to a standard version string
+    """
+    try:
+        # normalize the version
+        return str(_packaging_version.Version(version))
+    except _packaging_version.InvalidVersion:
+        version = version.replace(" ", ".")
+        return re.sub("[^A-Za-z0-9.]+", "-", version)
+
+
+setuptools_major_version = int(setuptools.__version__.split(".")[0])
+
+PY_LIMITED_API_PATTERN = r"cp3\d"
+
+
+def _is_32bit_interpreter() -> bool:
+    return struct.calcsize("P") == 4
+
+
+def python_tag() -> str:
+    return f"py{sys.version_info[0]}"
+
+
+def get_platform(archive_root: str | None) -> str:
+    """Return our platform name 'win32', 'linux_x86_64'"""
+    result = sysconfig.get_platform()
+    if result.startswith("macosx") and archive_root is not None:
+        from .macosx_libfile import calculate_macosx_platform_tag
+
+        result = calculate_macosx_platform_tag(archive_root, result)
+    elif _is_32bit_interpreter():
+        if result == "linux-x86_64":
+            # pip pull request #3497
+            result = "linux-i686"
+        elif result == "linux-aarch64":
+            # packaging pull request #234
+            # TODO armv8l, packaging pull request #690 => this did not land
+            # in pip/packaging yet
+            result = "linux-armv7l"
+
+    return result.replace("-", "_")
+
+
+def get_flag(
+    var: str, fallback: bool, expected: bool = True, warn: bool = True
+) -> bool:
+    """Use a fallback value for determining SOABI flags if the needed config
+    var is unset or unavailable."""
+    val = sysconfig.get_config_var(var)
+    if val is None:
+        if warn:
+            warnings.warn(
+                f"Config variable '{var}' is unset, Python ABI tag may be incorrect",
+                RuntimeWarning,
+                stacklevel=2,
+            )
+        return fallback
+    return val == expected
+
+
+def get_abi_tag() -> str | None:
+    """Return the ABI tag based on SOABI (if available) or emulate SOABI (PyPy2)."""
+    soabi: str = sysconfig.get_config_var("SOABI")
+    impl = tags.interpreter_name()
+    if not soabi and impl in ("cp", "pp") and hasattr(sys, "maxunicode"):
+        d = ""
+        m = ""
+        u = ""
+        if get_flag("Py_DEBUG", hasattr(sys, "gettotalrefcount"), warn=(impl == "cp")):
+            d = "d"
+
+        if get_flag(
+            "WITH_PYMALLOC",
+            impl == "cp",
+            warn=(impl == "cp" and sys.version_info < (3, 8)),
+        ) and sys.version_info < (3, 8):
+            m = "m"
+
+        abi = f"{impl}{tags.interpreter_version()}{d}{m}{u}"
+    elif soabi and impl == "cp" and soabi.startswith("cpython"):
+        # non-Windows
+        abi = "cp" + soabi.split("-")[1]
+    elif soabi and impl == "cp" and soabi.startswith("cp"):
+        # Windows
+        abi = soabi.split("-")[0]
+    elif soabi and impl == "pp":
+        # we want something like pypy36-pp73
+        abi = "-".join(soabi.split("-")[:2])
+        abi = abi.replace(".", "_").replace("-", "_")
+    elif soabi and impl == "graalpy":
+        abi = "-".join(soabi.split("-")[:3])
+        abi = abi.replace(".", "_").replace("-", "_")
+    elif soabi:
+        abi = soabi.replace(".", "_").replace("-", "_")
+    else:
+        abi = None
+
+    return abi
+
+
+def safer_name(name: str) -> str:
+    return safe_name(name).replace("-", "_")
+
+
+def safer_version(version: str) -> str:
+    return safe_version(version).replace("-", "_")
+
+
+def remove_readonly(
+    func: Callable[..., object],
+    path: str,
+    excinfo: tuple[type[Exception], Exception, types.TracebackType],
+) -> None:
+    remove_readonly_exc(func, path, excinfo[1])
+
+
+def remove_readonly_exc(func: Callable[..., object], path: str, exc: Exception) -> None:
+    os.chmod(path, stat.S_IWRITE)
+    func(path)
+
+
+class bdist_wheel(Command):
+    description = "create a wheel distribution"
+
+    supported_compressions = {
+        "stored": ZIP_STORED,
+        "deflated": ZIP_DEFLATED,
+    }
+
+    user_options = [
+        ("bdist-dir=", "b", "temporary directory for creating the distribution"),
+        (
+            "plat-name=",
+            "p",
+            "platform name to embed in generated filenames "
+            f"(default: {get_platform(None)})",
+        ),
+        (
+            "keep-temp",
+            "k",
+            "keep the pseudo-installation tree around after "
+            "creating the distribution archive",
+        ),
+        ("dist-dir=", "d", "directory to put final built distributions in"),
+        ("skip-build", None, "skip rebuilding everything (for testing/debugging)"),
+        (
+            "relative",
+            None,
+            "build the archive using relative paths (default: false)",
+        ),
+        (
+            "owner=",
+            "u",
+            "Owner name used when creating a tar file [default: current user]",
+        ),
+        (
+            "group=",
+            "g",
+            "Group name used when creating a tar file [default: current group]",
+        ),
+        ("universal", None, "make a universal wheel (default: false)"),
+        (
+            "compression=",
+            None,
+            "zipfile compression (one of: {}) (default: 'deflated')".format(
+                ", ".join(supported_compressions)
+            ),
+        ),
+        (
+            "python-tag=",
+            None,
+            f"Python implementation compatibility tag (default: '{python_tag()}')",
+        ),
+        (
+            "build-number=",
+            None,
+            "Build number for this particular version. "
+            "As specified in PEP-0427, this must start with a digit. "
+            "[default: None]",
+        ),
+        (
+            "py-limited-api=",
+            None,
+            "Python tag (cp32|cp33|cpNN) for abi3 wheel tag (default: false)",
+        ),
+    ]
+
+    boolean_options = ["keep-temp", "skip-build", "relative", "universal"]
+
+    def initialize_options(self):
+        self.bdist_dir: str = None
+        self.data_dir = None
+        self.plat_name: str | None = None
+        self.plat_tag = None
+        self.format = "zip"
+        self.keep_temp = False
+        self.dist_dir: str | None = None
+        self.egginfo_dir = None
+        self.root_is_pure: bool | None = None
+        self.skip_build = None
+        self.relative = False
+        self.owner = None
+        self.group = None
+        self.universal: bool = False
+        self.compression: str | int = "deflated"
+        self.python_tag: str = python_tag()
+        self.build_number: str | None = None
+        self.py_limited_api: str | Literal[False] = False
+        self.plat_name_supplied = False
+
+    def finalize_options(self):
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command("bdist").bdist_base
+            self.bdist_dir = os.path.join(bdist_base, "wheel")
+
+        egg_info = self.distribution.get_command_obj("egg_info")
+        egg_info.ensure_finalized()  # needed for correct `wheel_dist_name`
+
+        self.data_dir = self.wheel_dist_name + ".data"
+        self.plat_name_supplied = self.plat_name is not None
+
+        try:
+            self.compression = self.supported_compressions[self.compression]
+        except KeyError:
+            raise ValueError(f"Unsupported compression: {self.compression}") from None
+
+        need_options = ("dist_dir", "plat_name", "skip_build")
+
+        self.set_undefined_options("bdist", *zip(need_options, need_options))
+
+        self.root_is_pure = not (
+            self.distribution.has_ext_modules() or self.distribution.has_c_libraries()
+        )
+
+        if self.py_limited_api and not re.match(
+            PY_LIMITED_API_PATTERN, self.py_limited_api
+        ):
+            raise ValueError(f"py-limited-api must match '{PY_LIMITED_API_PATTERN}'")
+
+        # Support legacy [wheel] section for setting universal
+        wheel = self.distribution.get_option_dict("wheel")
+        if "universal" in wheel:
+            # please don't define this in your global configs
+            log.warning(
+                "The [wheel] section is deprecated. Use [bdist_wheel] instead.",
+            )
+            val = wheel["universal"][1].strip()
+            if val.lower() in ("1", "true", "yes"):
+                self.universal = True
+
+        if self.build_number is not None and not self.build_number[:1].isdigit():
+            raise ValueError("Build tag (build-number) must start with a digit.")
+
+    @property
+    def wheel_dist_name(self):
+        """Return distribution full name with - replaced with _"""
+        components = (
+            safer_name(self.distribution.get_name()),
+            safer_version(self.distribution.get_version()),
+        )
+        if self.build_number:
+            components += (self.build_number,)
+        return "-".join(components)
+
+    def get_tag(self) -> tuple[str, str, str]:
+        # bdist sets self.plat_name if unset, we should only use it for purepy
+        # wheels if the user supplied it.
+        if self.plat_name_supplied:
+            plat_name = cast(str, self.plat_name)
+        elif self.root_is_pure:
+            plat_name = "any"
+        else:
+            # macosx contains system version in platform name so need special handle
+            if self.plat_name and not self.plat_name.startswith("macosx"):
+                plat_name = self.plat_name
+            else:
+                # on macosx always limit the platform name to comply with any
+                # c-extension modules in bdist_dir, since the user can specify
+                # a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
+
+                # on other platforms, and on macosx if there are no c-extension
+                # modules, use the default platform name.
+                plat_name = get_platform(self.bdist_dir)
+
+            if _is_32bit_interpreter():
+                if plat_name in ("linux-x86_64", "linux_x86_64"):
+                    plat_name = "linux_i686"
+                if plat_name in ("linux-aarch64", "linux_aarch64"):
+                    # TODO armv8l, packaging pull request #690 => this did not land
+                    # in pip/packaging yet
+                    plat_name = "linux_armv7l"
+
+        plat_name = (
+            plat_name.lower().replace("-", "_").replace(".", "_").replace(" ", "_")
+        )
+
+        if self.root_is_pure:
+            if self.universal:
+                impl = "py2.py3"
+            else:
+                impl = self.python_tag
+            tag = (impl, "none", plat_name)
+        else:
+            impl_name = tags.interpreter_name()
+            impl_ver = tags.interpreter_version()
+            impl = impl_name + impl_ver
+            # We don't work on CPython 3.1, 3.0.
+            if self.py_limited_api and (impl_name + impl_ver).startswith("cp3"):
+                impl = self.py_limited_api
+                abi_tag = "abi3"
+            else:
+                abi_tag = str(get_abi_tag()).lower()
+            tag = (impl, abi_tag, plat_name)
+            # issue gh-374: allow overriding plat_name
+            supported_tags = [
+                (t.interpreter, t.abi, plat_name) for t in tags.sys_tags()
+            ]
+            assert (
+                tag in supported_tags
+            ), f"would build wheel with unsupported tag {tag}"
+        return tag
+
+    def run(self):
+        build_scripts = self.reinitialize_command("build_scripts")
+        build_scripts.executable = "python"
+        build_scripts.force = True
+
+        build_ext = self.reinitialize_command("build_ext")
+        build_ext.inplace = False
+
+        if not self.skip_build:
+            self.run_command("build")
+
+        install = self.reinitialize_command("install", reinit_subcommands=True)
+        install.root = self.bdist_dir
+        install.compile = False
+        install.skip_build = self.skip_build
+        install.warn_dir = False
+
+        # A wheel without setuptools scripts is more cross-platform.
+        # Use the (undocumented) `no_ep` option to setuptools'
+        # install_scripts command to avoid creating entry point scripts.
+        install_scripts = self.reinitialize_command("install_scripts")
+        install_scripts.no_ep = True
+
+        # Use a custom scheme for the archive, because we have to decide
+        # at installation time which scheme to use.
+        for key in ("headers", "scripts", "data", "purelib", "platlib"):
+            setattr(install, "install_" + key, os.path.join(self.data_dir, key))
+
+        basedir_observed = ""
+
+        if os.name == "nt":
+            # win32 barfs if any of these are ''; could be '.'?
+            # (distutils.command.install:change_roots bug)
+            basedir_observed = os.path.normpath(os.path.join(self.data_dir, ".."))
+            self.install_libbase = self.install_lib = basedir_observed
+
+        setattr(
+            install,
+            "install_purelib" if self.root_is_pure else "install_platlib",
+            basedir_observed,
+        )
+
+        log.info(f"installing to {self.bdist_dir}")
+
+        self.run_command("install")
+
+        impl_tag, abi_tag, plat_tag = self.get_tag()
+        archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
+        if not self.relative:
+            archive_root = self.bdist_dir
+        else:
+            archive_root = os.path.join(
+                self.bdist_dir, self._ensure_relative(install.install_base)
+            )
+
+        self.set_undefined_options("install_egg_info", ("target", "egginfo_dir"))
+        distinfo_dirname = (
+            f"{safer_name(self.distribution.get_name())}-"
+            f"{safer_version(self.distribution.get_version())}.dist-info"
+        )
+        distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
+        self.egg2dist(self.egginfo_dir, distinfo_dir)
+
+        self.write_wheelfile(distinfo_dir)
+
+        # Make the archive
+        if not os.path.exists(self.dist_dir):
+            os.makedirs(self.dist_dir)
+
+        wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
+        with WheelFile(wheel_path, "w", self.compression) as wf:
+            wf.write_files(archive_root)
+
+        # Add to 'Distribution.dist_files' so that the "upload" command works
+        getattr(self.distribution, "dist_files", []).append(
+            (
+                "bdist_wheel",
+                "{}.{}".format(*sys.version_info[:2]),  # like 3.7
+                wheel_path,
+            )
+        )
+
+        if not self.keep_temp:
+            log.info(f"removing {self.bdist_dir}")
+            if not self.dry_run:
+                if sys.version_info < (3, 12):
+                    rmtree(self.bdist_dir, onerror=remove_readonly)
+                else:
+                    rmtree(self.bdist_dir, onexc=remove_readonly_exc)
+
+    def write_wheelfile(
+        self, wheelfile_base: str, generator: str = f"bdist_wheel ({wheel_version})"
+    ):
+        from email.message import Message
+
+        msg = Message()
+        msg["Wheel-Version"] = "1.0"  # of the spec
+        msg["Generator"] = generator
+        msg["Root-Is-Purelib"] = str(self.root_is_pure).lower()
+        if self.build_number is not None:
+            msg["Build"] = self.build_number
+
+        # Doesn't work for bdist_wininst
+        impl_tag, abi_tag, plat_tag = self.get_tag()
+        for impl in impl_tag.split("."):
+            for abi in abi_tag.split("."):
+                for plat in plat_tag.split("."):
+                    msg["Tag"] = "-".join((impl, abi, plat))
+
+        wheelfile_path = os.path.join(wheelfile_base, "WHEEL")
+        log.info(f"creating {wheelfile_path}")
+        with open(wheelfile_path, "wb") as f:
+            BytesGenerator(f, maxheaderlen=0).flatten(msg)
+
+    def _ensure_relative(self, path: str) -> str:
+        # copied from dir_util, deleted
+        drive, path = os.path.splitdrive(path)
+        if path[0:1] == os.sep:
+            path = drive + path[1:]
+        return path
+
+    @property
+    def license_paths(self) -> Iterable[str]:
+        if setuptools_major_version >= 57:
+            # Setuptools has resolved any patterns to actual file names
+            return self.distribution.metadata.license_files or ()
+
+        files: set[str] = set()
+        metadata = self.distribution.get_option_dict("metadata")
+        if setuptools_major_version >= 42:
+            # Setuptools recognizes the license_files option but does not do globbing
+            patterns = cast(Sequence[str], self.distribution.metadata.license_files)
+        else:
+            # Prior to those, wheel is entirely responsible for handling license files
+            if "license_files" in metadata:
+                patterns = metadata["license_files"][1].split()
+            else:
+                patterns = ()
+
+        if "license_file" in metadata:
+            warnings.warn(
+                'The "license_file" option is deprecated. Use "license_files" instead.',
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            files.add(metadata["license_file"][1])
+
+        if not files and not patterns and not isinstance(patterns, list):
+            patterns = ("LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*")
+
+        for pattern in patterns:
+            for path in iglob(pattern):
+                if path.endswith("~"):
+                    log.debug(
+                        f'ignoring license file "{path}" as it looks like a backup'
+                    )
+                    continue
+
+                if path not in files and os.path.isfile(path):
+                    log.info(
+                        f'adding license file "{path}" (matched pattern "{pattern}")'
+                    )
+                    files.add(path)
+
+        return files
+
+    def egg2dist(self, egginfo_path: str, distinfo_path: str):
+        """Convert an .egg-info directory into a .dist-info directory"""
+
+        def adios(p: str) -> None:
+            """Appropriately delete directory, file or link."""
+            if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
+                shutil.rmtree(p)
+            elif os.path.exists(p):
+                os.unlink(p)
+
+        adios(distinfo_path)
+
+        if not os.path.exists(egginfo_path):
+            # There is no egg-info. This is probably because the egg-info
+            # file/directory is not named matching the distribution name used
+            # to name the archive file. Check for this case and report
+            # accordingly.
+            import glob
+
+            pat = os.path.join(os.path.dirname(egginfo_path), "*.egg-info")
+            possible = glob.glob(pat)
+            err = f"Egg metadata expected at {egginfo_path} but not found"
+            if possible:
+                alt = os.path.basename(possible[0])
+                err += f" ({alt} found - possible misnamed archive file?)"
+
+            raise ValueError(err)
+
+        if os.path.isfile(egginfo_path):
+            # .egg-info is a single file
+            pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
+            os.mkdir(distinfo_path)
+        else:
+            # .egg-info is a directory
+            pkginfo_path = os.path.join(egginfo_path, "PKG-INFO")
+            pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
+
+            # ignore common egg metadata that is useless to wheel
+            shutil.copytree(
+                egginfo_path,
+                distinfo_path,
+                ignore=lambda x, y: {
+                    "PKG-INFO",
+                    "requires.txt",
+                    "SOURCES.txt",
+                    "not-zip-safe",
+                },
+            )
+
+            # delete dependency_links if it is only whitespace
+            dependency_links_path = os.path.join(distinfo_path, "dependency_links.txt")
+            with open(dependency_links_path, encoding="utf-8") as dependency_links_file:
+                dependency_links = dependency_links_file.read().strip()
+            if not dependency_links:
+                adios(dependency_links_path)
+
+        pkg_info_path = os.path.join(distinfo_path, "METADATA")
+        serialization_policy = EmailPolicy(
+            utf8=True,
+            mangle_from_=False,
+            max_line_length=0,
+        )
+        with open(pkg_info_path, "w", encoding="utf-8") as out:
+            Generator(out, policy=serialization_policy).flatten(pkg_info)
+
+        for license_path in self.license_paths:
+            filename = os.path.basename(license_path)
+            shutil.copy(license_path, os.path.join(distinfo_path, filename))
+
+        adios(egginfo_path)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_setuptools_logging.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_setuptools_logging.py
new file mode 100644
index 00000000..a1a2482b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/_setuptools_logging.py
@@ -0,0 +1,26 @@
+# copied from setuptools.logging, omitting monkeypatching
+from __future__ import annotations
+
+import logging
+import sys
+
+
+def _not_warning(record: logging.LogRecord) -> bool:
+    return record.levelno < logging.WARNING
+
+
+def configure() -> None:
+    """
+    Configure logging to emit warning and above to stderr
+    and everything else to stdout. This behavior is provided
+    for compatibility with distutils.log but may change in
+    the future.
+    """
+    err_handler = logging.StreamHandler()
+    err_handler.setLevel(logging.WARNING)
+    out_handler = logging.StreamHandler(sys.stdout)
+    out_handler.addFilter(_not_warning)
+    handlers = err_handler, out_handler
+    logging.basicConfig(
+        format="{message}", style="{", handlers=handlers, level=logging.DEBUG
+    )
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/bdist_wheel.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/bdist_wheel.py
new file mode 100644
index 00000000..dd7b8629
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/bdist_wheel.py
@@ -0,0 +1,26 @@
+from typing import TYPE_CHECKING
+from warnings import warn
+
+warn(
+    "The 'wheel' package is no longer the canonical location of the 'bdist_wheel' "
+    "command, and will be removed in a future release. Please update to setuptools "
+    "v70.1 or later which contains an integrated version of this command.",
+    DeprecationWarning,
+    stacklevel=1,
+)
+
+if TYPE_CHECKING:
+    from ._bdist_wheel import bdist_wheel as bdist_wheel
+else:
+    try:
+        # Better integration/compatibility with setuptools:
+        # in the case new fixes or PEPs are implemented in setuptools
+        # there is no need to backport them to the deprecated code base.
+        # This is useful in the case of old packages in the ecosystem
+        # that are still used but have low maintenance.
+        from setuptools.command.bdist_wheel import bdist_wheel
+    except ImportError:
+        # Only used in the case of old setuptools versions.
+        # If the user wants to get the latest fixes/PEPs,
+        # they are encouraged to address the deprecation warning.
+        from ._bdist_wheel import bdist_wheel as bdist_wheel
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/__init__.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/__init__.py
new file mode 100644
index 00000000..6ba1217f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/__init__.py
@@ -0,0 +1,155 @@
+"""
+Wheel command-line utility.
+"""
+
+from __future__ import annotations
+
+import argparse
+import os
+import sys
+from argparse import ArgumentTypeError
+
+
+class WheelError(Exception):
+    pass
+
+
+def unpack_f(args: argparse.Namespace) -> None:
+    from .unpack import unpack
+
+    unpack(args.wheelfile, args.dest)
+
+
+def pack_f(args: argparse.Namespace) -> None:
+    from .pack import pack
+
+    pack(args.directory, args.dest_dir, args.build_number)
+
+
+def convert_f(args: argparse.Namespace) -> None:
+    from .convert import convert
+
+    convert(args.files, args.dest_dir, args.verbose)
+
+
+def tags_f(args: argparse.Namespace) -> None:
+    from .tags import tags
+
+    names = (
+        tags(
+            wheel,
+            args.python_tag,
+            args.abi_tag,
+            args.platform_tag,
+            args.build,
+            args.remove,
+        )
+        for wheel in args.wheel
+    )
+
+    for name in names:
+        print(name)
+
+
+def version_f(args: argparse.Namespace) -> None:
+    from .. import __version__
+
+    print(f"wheel {__version__}")
+
+
+def parse_build_tag(build_tag: str) -> str:
+    if build_tag and not build_tag[0].isdigit():
+        raise ArgumentTypeError("build tag must begin with a digit")
+    elif "-" in build_tag:
+        raise ArgumentTypeError("invalid character ('-') in build tag")
+
+    return build_tag
+
+
+TAGS_HELP = """\
+Make a new wheel with given tags. Any tags unspecified will remain the same.
+Starting the tags with a "+" will append to the existing tags. Starting with a
+"-" will remove a tag (use --option=-TAG syntax). Multiple tags can be
+separated by ".". The original file will remain unless --remove is given.  The
+output filename(s) will be displayed on stdout for further processing.
+"""
+
+
+def parser():
+    p = argparse.ArgumentParser()
+    s = p.add_subparsers(help="commands")
+
+    unpack_parser = s.add_parser("unpack", help="Unpack wheel")
+    unpack_parser.add_argument(
+        "--dest", "-d", help="Destination directory", default="."
+    )
+    unpack_parser.add_argument("wheelfile", help="Wheel file")
+    unpack_parser.set_defaults(func=unpack_f)
+
+    repack_parser = s.add_parser("pack", help="Repack wheel")
+    repack_parser.add_argument("directory", help="Root directory of the unpacked wheel")
+    repack_parser.add_argument(
+        "--dest-dir",
+        "-d",
+        default=os.path.curdir,
+        help="Directory to store the wheel (default %(default)s)",
+    )
+    repack_parser.add_argument(
+        "--build-number", help="Build tag to use in the wheel name"
+    )
+    repack_parser.set_defaults(func=pack_f)
+
+    convert_parser = s.add_parser("convert", help="Convert egg or wininst to wheel")
+    convert_parser.add_argument("files", nargs="*", help="Files to convert")
+    convert_parser.add_argument(
+        "--dest-dir",
+        "-d",
+        default=os.path.curdir,
+        help="Directory to store wheels (default %(default)s)",
+    )
+    convert_parser.add_argument("--verbose", "-v", action="store_true")
+    convert_parser.set_defaults(func=convert_f)
+
+    tags_parser = s.add_parser(
+        "tags", help="Add or replace the tags on a wheel", description=TAGS_HELP
+    )
+    tags_parser.add_argument("wheel", nargs="*", help="Existing wheel(s) to retag")
+    tags_parser.add_argument(
+        "--remove",
+        action="store_true",
+        help="Remove the original files, keeping only the renamed ones",
+    )
+    tags_parser.add_argument(
+        "--python-tag", metavar="TAG", help="Specify an interpreter tag(s)"
+    )
+    tags_parser.add_argument("--abi-tag", metavar="TAG", help="Specify an ABI tag(s)")
+    tags_parser.add_argument(
+        "--platform-tag", metavar="TAG", help="Specify a platform tag(s)"
+    )
+    tags_parser.add_argument(
+        "--build", type=parse_build_tag, metavar="BUILD", help="Specify a build tag"
+    )
+    tags_parser.set_defaults(func=tags_f)
+
+    version_parser = s.add_parser("version", help="Print version and exit")
+    version_parser.set_defaults(func=version_f)
+
+    help_parser = s.add_parser("help", help="Show this help")
+    help_parser.set_defaults(func=lambda args: p.print_help())
+
+    return p
+
+
+def main():
+    p = parser()
+    args = p.parse_args()
+    if not hasattr(args, "func"):
+        p.print_help()
+    else:
+        try:
+            args.func(args)
+            return 0
+        except WheelError as e:
+            print(e, file=sys.stderr)
+
+    return 1
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/convert.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/convert.py
new file mode 100644
index 00000000..61d4775c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/convert.py
@@ -0,0 +1,332 @@
+from __future__ import annotations
+
+import os.path
+import re
+from abc import ABCMeta, abstractmethod
+from collections import defaultdict
+from collections.abc import Iterator
+from email.message import Message
+from email.parser import Parser
+from email.policy import EmailPolicy
+from glob import iglob
+from pathlib import Path
+from textwrap import dedent
+from zipfile import ZipFile
+
+from .. import __version__
+from ..metadata import generate_requirements
+from ..vendored.packaging.tags import parse_tag
+from ..wheelfile import WheelFile
+
+egg_filename_re = re.compile(
+    r"""
+    (?P<name>.+?)-(?P<ver>.+?)
+    (-(?P<pyver>py\d\.\d+)
+     (-(?P<arch>.+?))?
+    )?.egg$""",
+    re.VERBOSE,
+)
+egg_info_re = re.compile(
+    r"""
+    ^(?P<name>.+?)-(?P<ver>.+?)
+    (-(?P<pyver>py\d\.\d+)
+    )?.egg-info/""",
+    re.VERBOSE,
+)
+wininst_re = re.compile(
+    r"\.(?P<platform>win32|win-amd64)(?:-(?P<pyver>py\d\.\d))?\.exe$"
+)
+pyd_re = re.compile(r"\.(?P<abi>[a-z0-9]+)-(?P<platform>win32|win_amd64)\.pyd$")
+serialization_policy = EmailPolicy(
+    utf8=True,
+    mangle_from_=False,
+    max_line_length=0,
+)
+GENERATOR = f"wheel {__version__}"
+
+
+def convert_requires(requires: str, metadata: Message) -> None:
+    extra: str | None = None
+    requirements: dict[str | None, list[str]] = defaultdict(list)
+    for line in requires.splitlines():
+        line = line.strip()
+        if not line:
+            continue
+
+        if line.startswith("[") and line.endswith("]"):
+            extra = line[1:-1]
+            continue
+
+        requirements[extra].append(line)
+
+    for key, value in generate_requirements(requirements):
+        metadata.add_header(key, value)
+
+
+def convert_pkg_info(pkginfo: str, metadata: Message):
+    parsed_message = Parser().parsestr(pkginfo)
+    for key, value in parsed_message.items():
+        key_lower = key.lower()
+        if value == "UNKNOWN":
+            continue
+
+        if key_lower == "description":
+            description_lines = value.splitlines()
+            value = "\n".join(
+                (
+                    description_lines[0].lstrip(),
+                    dedent("\n".join(description_lines[1:])),
+                    "\n",
+                )
+            )
+            metadata.set_payload(value)
+        elif key_lower == "home-page":
+            metadata.add_header("Project-URL", f"Homepage, {value}")
+        elif key_lower == "download-url":
+            metadata.add_header("Project-URL", f"Download, {value}")
+        else:
+            metadata.add_header(key, value)
+
+    metadata.replace_header("Metadata-Version", "2.4")
+
+
+def normalize(name: str) -> str:
+    return re.sub(r"[-_.]+", "-", name).lower().replace("-", "_")
+
+
+class ConvertSource(metaclass=ABCMeta):
+    name: str
+    version: str
+    pyver: str = "py2.py3"
+    abi: str = "none"
+    platform: str = "any"
+    metadata: Message
+
+    @property
+    def dist_info_dir(self) -> str:
+        return f"{self.name}-{self.version}.dist-info"
+
+    @abstractmethod
+    def generate_contents(self) -> Iterator[tuple[str, bytes]]:
+        pass
+
+
+class EggFileSource(ConvertSource):
+    def __init__(self, path: Path):
+        if not (match := egg_filename_re.match(path.name)):
+            raise ValueError(f"Invalid egg file name: {path.name}")
+
+        # Binary wheels are assumed to be for CPython
+        self.path = path
+        self.name = normalize(match.group("name"))
+        self.version = match.group("ver")
+        if pyver := match.group("pyver"):
+            self.pyver = pyver.replace(".", "")
+            if arch := match.group("arch"):
+                self.abi = self.pyver.replace("py", "cp")
+                self.platform = normalize(arch)
+
+        self.metadata = Message()
+
+    def generate_contents(self) -> Iterator[tuple[str, bytes]]:
+        with ZipFile(self.path, "r") as zip_file:
+            for filename in sorted(zip_file.namelist()):
+                # Skip pure directory entries
+                if filename.endswith("/"):
+                    continue
+
+                # Handle files in the egg-info directory specially, selectively moving
+                # them to the dist-info directory while converting as needed
+                if filename.startswith("EGG-INFO/"):
+                    if filename == "EGG-INFO/requires.txt":
+                        requires = zip_file.read(filename).decode("utf-8")
+                        convert_requires(requires, self.metadata)
+                    elif filename == "EGG-INFO/PKG-INFO":
+                        pkginfo = zip_file.read(filename).decode("utf-8")
+                        convert_pkg_info(pkginfo, self.metadata)
+                    elif filename == "EGG-INFO/entry_points.txt":
+                        yield (
+                            f"{self.dist_info_dir}/entry_points.txt",
+                            zip_file.read(filename),
+                        )
+
+                    continue
+
+                # For any other file, just pass it through
+                yield filename, zip_file.read(filename)
+
+
+class EggDirectorySource(EggFileSource):
+    def generate_contents(self) -> Iterator[tuple[str, bytes]]:
+        for dirpath, _, filenames in os.walk(self.path):
+            for filename in sorted(filenames):
+                path = Path(dirpath, filename)
+                if path.parent.name == "EGG-INFO":
+                    if path.name == "requires.txt":
+                        requires = path.read_text("utf-8")
+                        convert_requires(requires, self.metadata)
+                    elif path.name == "PKG-INFO":
+                        pkginfo = path.read_text("utf-8")
+                        convert_pkg_info(pkginfo, self.metadata)
+                        if name := self.metadata.get("Name"):
+                            self.name = normalize(name)
+
+                        if version := self.metadata.get("Version"):
+                            self.version = version
+                    elif path.name == "entry_points.txt":
+                        yield (
+                            f"{self.dist_info_dir}/entry_points.txt",
+                            path.read_bytes(),
+                        )
+
+                    continue
+
+                # For any other file, just pass it through
+                yield str(path.relative_to(self.path)), path.read_bytes()
+
+
+class WininstFileSource(ConvertSource):
+    """
+    Handles distributions created with ``bdist_wininst``.
+
+    The egginfo filename has the format::
+
+        name-ver(-pyver)(-arch).egg-info
+
+    The installer filename has the format::
+
+        name-ver.arch(-pyver).exe
+
+    Some things to note:
+
+    1. The installer filename is not definitive. An installer can be renamed
+       and work perfectly well as an installer. So more reliable data should
+       be used whenever possible.
+    2. The egg-info data should be preferred for the name and version, because
+       these come straight from the distutils metadata, and are mandatory.
+    3. The pyver from the egg-info data should be ignored, as it is
+       constructed from the version of Python used to build the installer,
+       which is irrelevant - the installer filename is correct here (even to
+       the point that when it's not there, any version is implied).
+    4. The architecture must be taken from the installer filename, as it is
+       not included in the egg-info data.
+    5. Architecture-neutral installers still have an architecture because the
+       installer format itself (being executable) is architecture-specific. We
+       should therefore ignore the architecture if the content is pure-python.
+    """
+
+    def __init__(self, path: Path):
+        self.path = path
+        self.metadata = Message()
+
+        # Determine the initial architecture and Python version from the file name
+        # (if possible)
+        if match := wininst_re.search(path.name):
+            self.platform = normalize(match.group("platform"))
+            if pyver := match.group("pyver"):
+                self.pyver = pyver.replace(".", "")
+
+        # Look for an .egg-info directory and any .pyd files for more precise info
+        egg_info_found = pyd_found = False
+        with ZipFile(self.path) as zip_file:
+            for filename in zip_file.namelist():
+                prefix, filename = filename.split("/", 1)
+                if not egg_info_found and (match := egg_info_re.match(filename)):
+                    egg_info_found = True
+                    self.name = normalize(match.group("name"))
+                    self.version = match.group("ver")
+                    if pyver := match.group("pyver"):
+                        self.pyver = pyver.replace(".", "")
+                elif not pyd_found and (match := pyd_re.search(filename)):
+                    pyd_found = True
+                    self.abi = match.group("abi")
+                    self.platform = match.group("platform")
+
+                if egg_info_found and pyd_found:
+                    break
+
+    def generate_contents(self) -> Iterator[tuple[str, bytes]]:
+        dist_info_dir = f"{self.name}-{self.version}.dist-info"
+        data_dir = f"{self.name}-{self.version}.data"
+        with ZipFile(self.path, "r") as zip_file:
+            for filename in sorted(zip_file.namelist()):
+                # Skip pure directory entries
+                if filename.endswith("/"):
+                    continue
+
+                # Handle files in the egg-info directory specially, selectively moving
+                # them to the dist-info directory while converting as needed
+                prefix, target_filename = filename.split("/", 1)
+                if egg_info_re.search(target_filename):
+                    basename = target_filename.rsplit("/", 1)[-1]
+                    if basename == "requires.txt":
+                        requires = zip_file.read(filename).decode("utf-8")
+                        convert_requires(requires, self.metadata)
+                    elif basename == "PKG-INFO":
+                        pkginfo = zip_file.read(filename).decode("utf-8")
+                        convert_pkg_info(pkginfo, self.metadata)
+                    elif basename == "entry_points.txt":
+                        yield (
+                            f"{dist_info_dir}/entry_points.txt",
+                            zip_file.read(filename),
+                        )
+
+                    continue
+                elif prefix == "SCRIPTS":
+                    target_filename = f"{data_dir}/scripts/{target_filename}"
+
+                # For any other file, just pass it through
+                yield target_filename, zip_file.read(filename)
+
+
+def convert(files: list[str], dest_dir: str, verbose: bool) -> None:
+    for pat in files:
+        for archive in iglob(pat):
+            path = Path(archive)
+            if path.suffix == ".egg":
+                if path.is_dir():
+                    source: ConvertSource = EggDirectorySource(path)
+                else:
+                    source = EggFileSource(path)
+            else:
+                source = WininstFileSource(path)
+
+            if verbose:
+                print(f"{archive}...", flush=True, end="")
+
+            dest_path = Path(dest_dir) / (
+                f"{source.name}-{source.version}-{source.pyver}-{source.abi}"
+                f"-{source.platform}.whl"
+            )
+            with WheelFile(dest_path, "w") as wheelfile:
+                for name_or_zinfo, contents in source.generate_contents():
+                    wheelfile.writestr(name_or_zinfo, contents)
+
+                # Write the METADATA file
+                wheelfile.writestr(
+                    f"{source.dist_info_dir}/METADATA",
+                    source.metadata.as_string(policy=serialization_policy).encode(
+                        "utf-8"
+                    ),
+                )
+
+                # Write the WHEEL file
+                wheel_message = Message()
+                wheel_message.add_header("Wheel-Version", "1.0")
+                wheel_message.add_header("Generator", GENERATOR)
+                wheel_message.add_header(
+                    "Root-Is-Purelib", str(source.platform == "any").lower()
+                )
+                tags = parse_tag(f"{source.pyver}-{source.abi}-{source.platform}")
+                for tag in sorted(tags, key=lambda tag: tag.interpreter):
+                    wheel_message.add_header("Tag", str(tag))
+
+                wheelfile.writestr(
+                    f"{source.dist_info_dir}/WHEEL",
+                    wheel_message.as_string(policy=serialization_policy).encode(
+                        "utf-8"
+                    ),
+                )
+
+            if verbose:
+                print("OK")
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/pack.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/pack.py
new file mode 100644
index 00000000..64469c0c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/pack.py
@@ -0,0 +1,85 @@
+from __future__ import annotations
+
+import email.policy
+import os.path
+import re
+from email.generator import BytesGenerator
+from email.parser import BytesParser
+
+from wheel.cli import WheelError
+from wheel.wheelfile import WheelFile
+
+DIST_INFO_RE = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))\.dist-info$")
+
+
+def pack(directory: str, dest_dir: str, build_number: str | None) -> None:
+    """Repack a previously unpacked wheel directory into a new wheel file.
+
+    The .dist-info/WHEEL file must contain one or more tags so that the target
+    wheel file name can be determined.
+
+    :param directory: The unpacked wheel directory
+    :param dest_dir: Destination directory (defaults to the current directory)
+    """
+    # Find the .dist-info directory
+    dist_info_dirs = [
+        fn
+        for fn in os.listdir(directory)
+        if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)
+    ]
+    if len(dist_info_dirs) > 1:
+        raise WheelError(f"Multiple .dist-info directories found in {directory}")
+    elif not dist_info_dirs:
+        raise WheelError(f"No .dist-info directories found in {directory}")
+
+    # Determine the target wheel filename
+    dist_info_dir = dist_info_dirs[0]
+    name_version = DIST_INFO_RE.match(dist_info_dir).group("namever")
+
+    # Read the tags and the existing build number from .dist-info/WHEEL
+    wheel_file_path = os.path.join(directory, dist_info_dir, "WHEEL")
+    with open(wheel_file_path, "rb") as f:
+        info = BytesParser(policy=email.policy.compat32).parse(f)
+        tags: list[str] = info.get_all("Tag", [])
+        existing_build_number = info.get("Build")
+
+        if not tags:
+            raise WheelError(
+                f"No tags present in {dist_info_dir}/WHEEL; cannot determine target "
+                f"wheel filename"
+            )
+
+    # Set the wheel file name and add/replace/remove the Build tag in .dist-info/WHEEL
+    build_number = build_number if build_number is not None else existing_build_number
+    if build_number is not None:
+        del info["Build"]
+        if build_number:
+            info["Build"] = build_number
+            name_version += "-" + build_number
+
+        if build_number != existing_build_number:
+            with open(wheel_file_path, "wb") as f:
+                BytesGenerator(f, maxheaderlen=0).flatten(info)
+
+    # Reassemble the tags for the wheel file
+    tagline = compute_tagline(tags)
+
+    # Repack the wheel
+    wheel_path = os.path.join(dest_dir, f"{name_version}-{tagline}.whl")
+    with WheelFile(wheel_path, "w") as wf:
+        print(f"Repacking wheel as {wheel_path}...", end="", flush=True)
+        wf.write_files(directory)
+
+    print("OK")
+
+
+def compute_tagline(tags: list[str]) -> str:
+    """Compute a tagline from a list of tags.
+
+    :param tags: A list of tags
+    :return: A tagline
+    """
+    impls = sorted({tag.split("-")[0] for tag in tags})
+    abivers = sorted({tag.split("-")[1] for tag in tags})
+    platforms = sorted({tag.split("-")[2] for tag in tags})
+    return "-".join([".".join(impls), ".".join(abivers), ".".join(platforms)])
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/tags.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/tags.py
new file mode 100644
index 00000000..88da72e9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/tags.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import email.policy
+import itertools
+import os
+from collections.abc import Iterable
+from email.parser import BytesParser
+
+from ..wheelfile import WheelFile
+
+
+def _compute_tags(original_tags: Iterable[str], new_tags: str | None) -> set[str]:
+    """Add or replace tags. Supports dot-separated tags"""
+    if new_tags is None:
+        return set(original_tags)
+
+    if new_tags.startswith("+"):
+        return {*original_tags, *new_tags[1:].split(".")}
+
+    if new_tags.startswith("-"):
+        return set(original_tags) - set(new_tags[1:].split("."))
+
+    return set(new_tags.split("."))
+
+
+def tags(
+    wheel: str,
+    python_tags: str | None = None,
+    abi_tags: str | None = None,
+    platform_tags: str | None = None,
+    build_tag: str | None = None,
+    remove: bool = False,
+) -> str:
+    """Change the tags on a wheel file.
+
+    The tags are left unchanged if they are not specified. To specify "none",
+    use ["none"]. To append to the previous tags, a tag should start with a
+    "+".  If a tag starts with "-", it will be removed from existing tags.
+    Processing is done left to right.
+
+    :param wheel: The paths to the wheels
+    :param python_tags: The Python tags to set
+    :param abi_tags: The ABI tags to set
+    :param platform_tags: The platform tags to set
+    :param build_tag: The build tag to set
+    :param remove: Remove the original wheel
+    """
+    with WheelFile(wheel, "r") as f:
+        assert f.filename, f"{f.filename} must be available"
+
+        wheel_info = f.read(f.dist_info_path + "/WHEEL")
+        info = BytesParser(policy=email.policy.compat32).parsebytes(wheel_info)
+
+        original_wheel_name = os.path.basename(f.filename)
+        namever = f.parsed_filename.group("namever")
+        build = f.parsed_filename.group("build")
+        original_python_tags = f.parsed_filename.group("pyver").split(".")
+        original_abi_tags = f.parsed_filename.group("abi").split(".")
+        original_plat_tags = f.parsed_filename.group("plat").split(".")
+
+    tags: list[str] = info.get_all("Tag", [])
+    existing_build_tag = info.get("Build")
+
+    impls = {tag.split("-")[0] for tag in tags}
+    abivers = {tag.split("-")[1] for tag in tags}
+    platforms = {tag.split("-")[2] for tag in tags}
+
+    if impls != set(original_python_tags):
+        msg = f"Wheel internal tags {impls!r} != filename tags {original_python_tags!r}"
+        raise AssertionError(msg)
+
+    if abivers != set(original_abi_tags):
+        msg = f"Wheel internal tags {abivers!r} != filename tags {original_abi_tags!r}"
+        raise AssertionError(msg)
+
+    if platforms != set(original_plat_tags):
+        msg = (
+            f"Wheel internal tags {platforms!r} != filename tags {original_plat_tags!r}"
+        )
+        raise AssertionError(msg)
+
+    if existing_build_tag != build:
+        msg = (
+            f"Incorrect filename '{build}' "
+            f"& *.dist-info/WHEEL '{existing_build_tag}' build numbers"
+        )
+        raise AssertionError(msg)
+
+    # Start changing as needed
+    if build_tag is not None:
+        build = build_tag
+
+    final_python_tags = sorted(_compute_tags(original_python_tags, python_tags))
+    final_abi_tags = sorted(_compute_tags(original_abi_tags, abi_tags))
+    final_plat_tags = sorted(_compute_tags(original_plat_tags, platform_tags))
+
+    final_tags = [
+        namever,
+        ".".join(final_python_tags),
+        ".".join(final_abi_tags),
+        ".".join(final_plat_tags),
+    ]
+    if build:
+        final_tags.insert(1, build)
+
+    final_wheel_name = "-".join(final_tags) + ".whl"
+
+    if original_wheel_name != final_wheel_name:
+        del info["Tag"], info["Build"]
+        for a, b, c in itertools.product(
+            final_python_tags, final_abi_tags, final_plat_tags
+        ):
+            info["Tag"] = f"{a}-{b}-{c}"
+        if build:
+            info["Build"] = build
+
+        original_wheel_path = os.path.join(
+            os.path.dirname(f.filename), original_wheel_name
+        )
+        final_wheel_path = os.path.join(os.path.dirname(f.filename), final_wheel_name)
+
+        with WheelFile(original_wheel_path, "r") as fin, WheelFile(
+            final_wheel_path, "w"
+        ) as fout:
+            fout.comment = fin.comment  # preserve the comment
+            for item in fin.infolist():
+                if item.is_dir():
+                    continue
+                if item.filename == f.dist_info_path + "/RECORD":
+                    continue
+                if item.filename == f.dist_info_path + "/WHEEL":
+                    fout.writestr(item, info.as_bytes())
+                else:
+                    fout.writestr(item, fin.read(item))
+
+        if remove:
+            os.remove(original_wheel_path)
+
+    return final_wheel_name
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/unpack.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/unpack.py
new file mode 100644
index 00000000..d48840e6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/cli/unpack.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+from ..wheelfile import WheelFile
+
+
+def unpack(path: str, dest: str = ".") -> None:
+    """Unpack a wheel.
+
+    Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
+    is the package name and {ver} its version.
+
+    :param path: The path to the wheel.
+    :param dest: Destination directory (default to current directory).
+    """
+    with WheelFile(path) as wf:
+        namever = wf.parsed_filename.group("namever")
+        destination = Path(dest) / namever
+        print(f"Unpacking to: {destination}...", end="", flush=True)
+        for zinfo in wf.filelist:
+            wf.extract(zinfo, destination)
+
+            # Set permissions to the same values as they were set in the archive
+            # We have to do this manually due to
+            # https://github.com/python/cpython/issues/59999
+            permissions = zinfo.external_attr >> 16 & 0o777
+            destination.joinpath(zinfo.filename).chmod(permissions)
+
+    print("OK")
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/macosx_libfile.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/macosx_libfile.py
new file mode 100644
index 00000000..abdfc9ed
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/macosx_libfile.py
@@ -0,0 +1,482 @@
+"""
+This module contains function to analyse dynamic library
+headers to extract system information
+
+Currently only for MacOSX
+
+Library file on macosx system starts with Mach-O or Fat field.
+This can be distinguish by first 32 bites and it is called magic number.
+Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
+reversed bytes order.
+Both fields can occur in two types: 32 and 64 bytes.
+
+FAT field inform that this library contains few version of library
+(typically for different types version). It contains
+information where Mach-O headers starts.
+
+Each section started with Mach-O header contains one library
+(So if file starts with this field it contains only one version).
+
+After filed Mach-O there are section fields.
+Each of them starts with two fields:
+cmd - magic number for this command
+cmdsize - total size occupied by this section information.
+
+In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
+and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
+because them contains information about minimal system version.
+
+Important remarks:
+- For fat files this implementation looks for maximum number version.
+  It not check if it is 32 or 64 and do not compare it with currently built package.
+  So it is possible to false report higher version that needed.
+- All structures signatures are taken form macosx header files.
+- I think that binary format will be more stable than `otool` output.
+  and if apple introduce some changes both implementation will need to be updated.
+- The system compile will set the deployment target no lower than
+  11.0 for arm64 builds. For "Universal 2" builds use the x86_64 deployment
+  target when the arm64 target is 11.0.
+"""
+
+from __future__ import annotations
+
+import ctypes
+import os
+import sys
+from io import BufferedIOBase
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from typing import Union
+
+    StrPath = Union[str, os.PathLike[str]]
+
+"""here the needed const and struct from mach-o header files"""
+
+FAT_MAGIC = 0xCAFEBABE
+FAT_CIGAM = 0xBEBAFECA
+FAT_MAGIC_64 = 0xCAFEBABF
+FAT_CIGAM_64 = 0xBFBAFECA
+MH_MAGIC = 0xFEEDFACE
+MH_CIGAM = 0xCEFAEDFE
+MH_MAGIC_64 = 0xFEEDFACF
+MH_CIGAM_64 = 0xCFFAEDFE
+
+LC_VERSION_MIN_MACOSX = 0x24
+LC_BUILD_VERSION = 0x32
+
+CPU_TYPE_ARM64 = 0x0100000C
+
+mach_header_fields = [
+    ("magic", ctypes.c_uint32),
+    ("cputype", ctypes.c_int),
+    ("cpusubtype", ctypes.c_int),
+    ("filetype", ctypes.c_uint32),
+    ("ncmds", ctypes.c_uint32),
+    ("sizeofcmds", ctypes.c_uint32),
+    ("flags", ctypes.c_uint32),
+]
+"""
+struct mach_header {
+    uint32_t	magic;		/* mach magic number identifier */
+    cpu_type_t	cputype;	/* cpu specifier */
+    cpu_subtype_t	cpusubtype;	/* machine specifier */
+    uint32_t	filetype;	/* type of file */
+    uint32_t	ncmds;		/* number of load commands */
+    uint32_t	sizeofcmds;	/* the size of all the load commands */
+    uint32_t	flags;		/* flags */
+};
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+"""
+
+mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
+"""
+struct mach_header_64 {
+    uint32_t	magic;		/* mach magic number identifier */
+    cpu_type_t	cputype;	/* cpu specifier */
+    cpu_subtype_t	cpusubtype;	/* machine specifier */
+    uint32_t	filetype;	/* type of file */
+    uint32_t	ncmds;		/* number of load commands */
+    uint32_t	sizeofcmds;	/* the size of all the load commands */
+    uint32_t	flags;		/* flags */
+    uint32_t	reserved;	/* reserved */
+};
+"""
+
+fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
+"""
+struct fat_header {
+    uint32_t	magic;		/* FAT_MAGIC or FAT_MAGIC_64 */
+    uint32_t	nfat_arch;	/* number of structs that follow */
+};
+"""
+
+fat_arch_fields = [
+    ("cputype", ctypes.c_int),
+    ("cpusubtype", ctypes.c_int),
+    ("offset", ctypes.c_uint32),
+    ("size", ctypes.c_uint32),
+    ("align", ctypes.c_uint32),
+]
+"""
+struct fat_arch {
+    cpu_type_t	cputype;	/* cpu specifier (int) */
+    cpu_subtype_t	cpusubtype;	/* machine specifier (int) */
+    uint32_t	offset;		/* file offset to this object file */
+    uint32_t	size;		/* size of this object file */
+    uint32_t	align;		/* alignment as a power of 2 */
+};
+"""
+
+fat_arch_64_fields = [
+    ("cputype", ctypes.c_int),
+    ("cpusubtype", ctypes.c_int),
+    ("offset", ctypes.c_uint64),
+    ("size", ctypes.c_uint64),
+    ("align", ctypes.c_uint32),
+    ("reserved", ctypes.c_uint32),
+]
+"""
+struct fat_arch_64 {
+    cpu_type_t	cputype;	/* cpu specifier (int) */
+    cpu_subtype_t	cpusubtype;	/* machine specifier (int) */
+    uint64_t	offset;		/* file offset to this object file */
+    uint64_t	size;		/* size of this object file */
+    uint32_t	align;		/* alignment as a power of 2 */
+    uint32_t	reserved;	/* reserved */
+};
+"""
+
+segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
+"""base for reading segment info"""
+
+segment_command_fields = [
+    ("cmd", ctypes.c_uint32),
+    ("cmdsize", ctypes.c_uint32),
+    ("segname", ctypes.c_char * 16),
+    ("vmaddr", ctypes.c_uint32),
+    ("vmsize", ctypes.c_uint32),
+    ("fileoff", ctypes.c_uint32),
+    ("filesize", ctypes.c_uint32),
+    ("maxprot", ctypes.c_int),
+    ("initprot", ctypes.c_int),
+    ("nsects", ctypes.c_uint32),
+    ("flags", ctypes.c_uint32),
+]
+"""
+struct segment_command { /* for 32-bit architectures */
+    uint32_t	cmd;		/* LC_SEGMENT */
+    uint32_t	cmdsize;	/* includes sizeof section structs */
+    char		segname[16];	/* segment name */
+    uint32_t	vmaddr;		/* memory address of this segment */
+    uint32_t	vmsize;		/* memory size of this segment */
+    uint32_t	fileoff;	/* file offset of this segment */
+    uint32_t	filesize;	/* amount to map from the file */
+    vm_prot_t	maxprot;	/* maximum VM protection */
+    vm_prot_t	initprot;	/* initial VM protection */
+    uint32_t	nsects;		/* number of sections in segment */
+    uint32_t	flags;		/* flags */
+};
+typedef int vm_prot_t;
+"""
+
+segment_command_fields_64 = [
+    ("cmd", ctypes.c_uint32),
+    ("cmdsize", ctypes.c_uint32),
+    ("segname", ctypes.c_char * 16),
+    ("vmaddr", ctypes.c_uint64),
+    ("vmsize", ctypes.c_uint64),
+    ("fileoff", ctypes.c_uint64),
+    ("filesize", ctypes.c_uint64),
+    ("maxprot", ctypes.c_int),
+    ("initprot", ctypes.c_int),
+    ("nsects", ctypes.c_uint32),
+    ("flags", ctypes.c_uint32),
+]
+"""
+struct segment_command_64 { /* for 64-bit architectures */
+    uint32_t	cmd;		/* LC_SEGMENT_64 */
+    uint32_t	cmdsize;	/* includes sizeof section_64 structs */
+    char		segname[16];	/* segment name */
+    uint64_t	vmaddr;		/* memory address of this segment */
+    uint64_t	vmsize;		/* memory size of this segment */
+    uint64_t	fileoff;	/* file offset of this segment */
+    uint64_t	filesize;	/* amount to map from the file */
+    vm_prot_t	maxprot;	/* maximum VM protection */
+    vm_prot_t	initprot;	/* initial VM protection */
+    uint32_t	nsects;		/* number of sections in segment */
+    uint32_t	flags;		/* flags */
+};
+"""
+
+version_min_command_fields = segment_base_fields + [
+    ("version", ctypes.c_uint32),
+    ("sdk", ctypes.c_uint32),
+]
+"""
+struct version_min_command {
+    uint32_t	cmd;		/* LC_VERSION_MIN_MACOSX or
+                               LC_VERSION_MIN_IPHONEOS or
+                               LC_VERSION_MIN_WATCHOS or
+                               LC_VERSION_MIN_TVOS */
+    uint32_t	cmdsize;	/* sizeof(struct min_version_command) */
+    uint32_t	version;	/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+    uint32_t	sdk;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+};
+"""
+
+build_version_command_fields = segment_base_fields + [
+    ("platform", ctypes.c_uint32),
+    ("minos", ctypes.c_uint32),
+    ("sdk", ctypes.c_uint32),
+    ("ntools", ctypes.c_uint32),
+]
+"""
+struct build_version_command {
+    uint32_t	cmd;		/* LC_BUILD_VERSION */
+    uint32_t	cmdsize;	/* sizeof(struct build_version_command) plus */
+                                /* ntools * sizeof(struct build_tool_version) */
+    uint32_t	platform;	/* platform */
+    uint32_t	minos;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+    uint32_t	sdk;		/* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+    uint32_t	ntools;		/* number of tool entries following this */
+};
+"""
+
+
+def swap32(x: int) -> int:
+    return (
+        ((x << 24) & 0xFF000000)
+        | ((x << 8) & 0x00FF0000)
+        | ((x >> 8) & 0x0000FF00)
+        | ((x >> 24) & 0x000000FF)
+    )
+
+
+def get_base_class_and_magic_number(
+    lib_file: BufferedIOBase,
+    seek: int | None = None,
+) -> tuple[type[ctypes.Structure], int]:
+    if seek is None:
+        seek = lib_file.tell()
+    else:
+        lib_file.seek(seek)
+    magic_number = ctypes.c_uint32.from_buffer_copy(
+        lib_file.read(ctypes.sizeof(ctypes.c_uint32))
+    ).value
+
+    # Handle wrong byte order
+    if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
+        if sys.byteorder == "little":
+            BaseClass = ctypes.BigEndianStructure
+        else:
+            BaseClass = ctypes.LittleEndianStructure
+
+        magic_number = swap32(magic_number)
+    else:
+        BaseClass = ctypes.Structure
+
+    lib_file.seek(seek)
+    return BaseClass, magic_number
+
+
+def read_data(struct_class: type[ctypes.Structure], lib_file: BufferedIOBase):
+    return struct_class.from_buffer_copy(lib_file.read(ctypes.sizeof(struct_class)))
+
+
+def extract_macosx_min_system_version(path_to_lib: str):
+    with open(path_to_lib, "rb") as lib_file:
+        BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
+        if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
+            return
+
+        if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
+
+            class FatHeader(BaseClass):
+                _fields_ = fat_header_fields
+
+            fat_header = read_data(FatHeader, lib_file)
+            if magic_number == FAT_MAGIC:
+
+                class FatArch(BaseClass):
+                    _fields_ = fat_arch_fields
+
+            else:
+
+                class FatArch(BaseClass):
+                    _fields_ = fat_arch_64_fields
+
+            fat_arch_list = [
+                read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)
+            ]
+
+            versions_list: list[tuple[int, int, int]] = []
+            for el in fat_arch_list:
+                try:
+                    version = read_mach_header(lib_file, el.offset)
+                    if version is not None:
+                        if el.cputype == CPU_TYPE_ARM64 and len(fat_arch_list) != 1:
+                            # Xcode will not set the deployment target below 11.0.0
+                            # for the arm64 architecture. Ignore the arm64 deployment
+                            # in fat binaries when the target is 11.0.0, that way
+                            # the other architectures can select a lower deployment
+                            # target.
+                            # This is safe because there is no arm64 variant for
+                            # macOS 10.15 or earlier.
+                            if version == (11, 0, 0):
+                                continue
+                        versions_list.append(version)
+                except ValueError:
+                    pass
+
+            if len(versions_list) > 0:
+                return max(versions_list)
+            else:
+                return None
+
+        else:
+            try:
+                return read_mach_header(lib_file, 0)
+            except ValueError:
+                """when some error during read library files"""
+                return None
+
+
+def read_mach_header(
+    lib_file: BufferedIOBase,
+    seek: int | None = None,
+) -> tuple[int, int, int] | None:
+    """
+    This function parses a Mach-O header and extracts
+    information about the minimal macOS version.
+
+    :param lib_file: reference to opened library file with pointer
+    """
+    base_class, magic_number = get_base_class_and_magic_number(lib_file, seek)
+    arch = "32" if magic_number == MH_MAGIC else "64"
+
+    class SegmentBase(base_class):
+        _fields_ = segment_base_fields
+
+    if arch == "32":
+
+        class MachHeader(base_class):
+            _fields_ = mach_header_fields
+
+    else:
+
+        class MachHeader(base_class):
+            _fields_ = mach_header_fields_64
+
+    mach_header = read_data(MachHeader, lib_file)
+    for _i in range(mach_header.ncmds):
+        pos = lib_file.tell()
+        segment_base = read_data(SegmentBase, lib_file)
+        lib_file.seek(pos)
+        if segment_base.cmd == LC_VERSION_MIN_MACOSX:
+
+            class VersionMinCommand(base_class):
+                _fields_ = version_min_command_fields
+
+            version_info = read_data(VersionMinCommand, lib_file)
+            return parse_version(version_info.version)
+        elif segment_base.cmd == LC_BUILD_VERSION:
+
+            class VersionBuild(base_class):
+                _fields_ = build_version_command_fields
+
+            version_info = read_data(VersionBuild, lib_file)
+            return parse_version(version_info.minos)
+        else:
+            lib_file.seek(pos + segment_base.cmdsize)
+            continue
+
+
+def parse_version(version: int) -> tuple[int, int, int]:
+    x = (version & 0xFFFF0000) >> 16
+    y = (version & 0x0000FF00) >> 8
+    z = version & 0x000000FF
+    return x, y, z
+
+
+def calculate_macosx_platform_tag(archive_root: StrPath, platform_tag: str) -> str:
+    """
+    Calculate proper macosx platform tag basing on files which are included to wheel
+
+    Example platform tag `macosx-10.14-x86_64`
+    """
+    prefix, base_version, suffix = platform_tag.split("-")
+    base_version = tuple(int(x) for x in base_version.split("."))
+    base_version = base_version[:2]
+    if base_version[0] > 10:
+        base_version = (base_version[0], 0)
+    assert len(base_version) == 2
+    if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+        deploy_target = tuple(
+            int(x) for x in os.environ["MACOSX_DEPLOYMENT_TARGET"].split(".")
+        )
+        deploy_target = deploy_target[:2]
+        if deploy_target[0] > 10:
+            deploy_target = (deploy_target[0], 0)
+        if deploy_target < base_version:
+            sys.stderr.write(
+                "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than "
+                "the version on which the Python interpreter was compiled ({}), and "
+                "will be ignored.\n".format(
+                    ".".join(str(x) for x in deploy_target),
+                    ".".join(str(x) for x in base_version),
+                )
+            )
+        else:
+            base_version = deploy_target
+
+    assert len(base_version) == 2
+    start_version = base_version
+    versions_dict: dict[str, tuple[int, int]] = {}
+    for dirpath, _dirnames, filenames in os.walk(archive_root):
+        for filename in filenames:
+            if filename.endswith(".dylib") or filename.endswith(".so"):
+                lib_path = os.path.join(dirpath, filename)
+                min_ver = extract_macosx_min_system_version(lib_path)
+                if min_ver is not None:
+                    min_ver = min_ver[0:2]
+                    if min_ver[0] > 10:
+                        min_ver = (min_ver[0], 0)
+                    versions_dict[lib_path] = min_ver
+
+    if len(versions_dict) > 0:
+        base_version = max(base_version, max(versions_dict.values()))
+
+    # macosx platform tag do not support minor bugfix release
+    fin_base_version = "_".join([str(x) for x in base_version])
+    if start_version < base_version:
+        problematic_files = [k for k, v in versions_dict.items() if v > start_version]
+        problematic_files = "\n".join(problematic_files)
+        if len(problematic_files) == 1:
+            files_form = "this file"
+        else:
+            files_form = "these files"
+        error_message = (
+            "[WARNING] This wheel needs a higher macOS version than {}  "
+            "To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least "
+            + fin_base_version
+            + " or recreate "
+            + files_form
+            + " with lower "
+            "MACOSX_DEPLOYMENT_TARGET:  \n" + problematic_files
+        )
+
+        if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+            error_message = error_message.format(
+                "is set in MACOSX_DEPLOYMENT_TARGET variable."
+            )
+        else:
+            error_message = error_message.format(
+                "the version your Python interpreter is compiled against."
+            )
+
+        sys.stderr.write(error_message)
+
+    platform_tag = prefix + "_" + fin_base_version + "_" + suffix
+    return platform_tag
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/metadata.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/metadata.py
new file mode 100644
index 00000000..b8098fa8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/metadata.py
@@ -0,0 +1,183 @@
+"""
+Tools for converting old- to new-style metadata.
+"""
+
+from __future__ import annotations
+
+import functools
+import itertools
+import os.path
+import re
+import textwrap
+from email.message import Message
+from email.parser import Parser
+from typing import Generator, Iterable, Iterator, Literal
+
+from .vendored.packaging.requirements import Requirement
+
+
+def _nonblank(str: str) -> bool | Literal[""]:
+    return str and not str.startswith("#")
+
+
+@functools.singledispatch
+def yield_lines(iterable: Iterable[str]) -> Iterator[str]:
+    r"""
+    Yield valid lines of a string or iterable.
+    >>> list(yield_lines(''))
+    []
+    >>> list(yield_lines(['foo', 'bar']))
+    ['foo', 'bar']
+    >>> list(yield_lines('foo\nbar'))
+    ['foo', 'bar']
+    >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
+    ['foo', 'baz #comment']
+    >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
+    ['foo', 'bar', 'baz', 'bing']
+    """
+    return itertools.chain.from_iterable(map(yield_lines, iterable))
+
+
+@yield_lines.register(str)
+def _(text: str) -> Iterator[str]:
+    return filter(_nonblank, map(str.strip, text.splitlines()))
+
+
+def split_sections(
+    s: str | Iterator[str],
+) -> Generator[tuple[str | None, list[str]], None, None]:
+    """Split a string or iterable thereof into (section, content) pairs
+    Each ``section`` is a stripped version of the section header ("[section]")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any such lines before the first section
+    header, they're returned in a first ``section`` of ``None``.
+    """
+    section = None
+    content: list[str] = []
+    for line in yield_lines(s):
+        if line.startswith("["):
+            if line.endswith("]"):
+                if section or content:
+                    yield section, content
+                section = line[1:-1].strip()
+                content = []
+            else:
+                raise ValueError("Invalid section heading", line)
+        else:
+            content.append(line)
+
+    # wrap up last segment
+    yield section, content
+
+
+def safe_extra(extra: str) -> str:
+    """Convert an arbitrary string to a standard 'extra' name
+    Any runs of non-alphanumeric characters are replaced with a single '_',
+    and the result is always lowercased.
+    """
+    return re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()
+
+
+def safe_name(name: str) -> str:
+    """Convert an arbitrary string to a standard distribution name
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub("[^A-Za-z0-9.]+", "-", name)
+
+
+def requires_to_requires_dist(requirement: Requirement) -> str:
+    """Return the version specifier for a requirement in PEP 345/566 fashion."""
+    if requirement.url:
+        return " @ " + requirement.url
+
+    requires_dist: list[str] = []
+    for spec in requirement.specifier:
+        requires_dist.append(spec.operator + spec.version)
+
+    if requires_dist:
+        return " " + ",".join(sorted(requires_dist))
+    else:
+        return ""
+
+
+def convert_requirements(requirements: list[str]) -> Iterator[str]:
+    """Yield Requires-Dist: strings for parsed requirements strings."""
+    for req in requirements:
+        parsed_requirement = Requirement(req)
+        spec = requires_to_requires_dist(parsed_requirement)
+        extras = ",".join(sorted(safe_extra(e) for e in parsed_requirement.extras))
+        if extras:
+            extras = f"[{extras}]"
+
+        yield safe_name(parsed_requirement.name) + extras + spec
+
+
+def generate_requirements(
+    extras_require: dict[str | None, list[str]],
+) -> Iterator[tuple[str, str]]:
+    """
+    Convert requirements from a setup()-style dictionary to
+    ('Requires-Dist', 'requirement') and ('Provides-Extra', 'extra') tuples.
+
+    extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
+    using the empty extra {'': [requirements]} to hold install_requires.
+    """
+    for extra, depends in extras_require.items():
+        condition = ""
+        extra = extra or ""
+        if ":" in extra:  # setuptools extra:condition syntax
+            extra, condition = extra.split(":", 1)
+
+        extra = safe_extra(extra)
+        if extra:
+            yield "Provides-Extra", extra
+            if condition:
+                condition = "(" + condition + ") and "
+            condition += f"extra == '{extra}'"
+
+        if condition:
+            condition = " ; " + condition
+
+        for new_req in convert_requirements(depends):
+            canonical_req = str(Requirement(new_req + condition))
+            yield "Requires-Dist", canonical_req
+
+
+def pkginfo_to_metadata(egg_info_path: str, pkginfo_path: str) -> Message:
+    """
+    Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
+    """
+    with open(pkginfo_path, encoding="utf-8") as headers:
+        pkg_info = Parser().parse(headers)
+
+    pkg_info.replace_header("Metadata-Version", "2.1")
+    # Those will be regenerated from `requires.txt`.
+    del pkg_info["Provides-Extra"]
+    del pkg_info["Requires-Dist"]
+    requires_path = os.path.join(egg_info_path, "requires.txt")
+    if os.path.exists(requires_path):
+        with open(requires_path, encoding="utf-8") as requires_file:
+            requires = requires_file.read()
+
+        parsed_requirements = sorted(split_sections(requires), key=lambda x: x[0] or "")
+        for extra, reqs in parsed_requirements:
+            for key, value in generate_requirements({extra: reqs}):
+                if (key, value) not in pkg_info.items():
+                    pkg_info[key] = value
+
+    description = pkg_info["Description"]
+    if description:
+        description_lines = pkg_info["Description"].splitlines()
+        dedented_description = "\n".join(
+            # if the first line of long_description is blank,
+            # the first line here will be indented.
+            (
+                description_lines[0].lstrip(),
+                textwrap.dedent("\n".join(description_lines[1:])),
+                "\n",
+            )
+        )
+        pkg_info.set_payload(dedented_description)
+        del pkg_info["Description"]
+
+    return pkg_info
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/util.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/util.py
new file mode 100644
index 00000000..c928aa40
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/util.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+import base64
+import logging
+
+log = logging.getLogger("wheel")
+
+
+def urlsafe_b64encode(data: bytes) -> bytes:
+    """urlsafe_b64encode without padding"""
+    return base64.urlsafe_b64encode(data).rstrip(b"=")
+
+
+def urlsafe_b64decode(data: bytes) -> bytes:
+    """urlsafe_b64decode without padding"""
+    pad = b"=" * (4 - (len(data) & 3))
+    return base64.urlsafe_b64decode(data + pad)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/__init__.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE
new file mode 100644
index 00000000..6f62d44e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE
@@ -0,0 +1,3 @@
+This software is made available under the terms of *either* of the licenses
+found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
+under the terms of *both* these licenses.
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.APACHE b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.APACHE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.APACHE
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.BSD b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.BSD
new file mode 100644
index 00000000..42ce7b75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/LICENSE.BSD
@@ -0,0 +1,23 @@
+Copyright (c) Donald Stufft and individual contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    1. Redistributions of source code must retain the above copyright notice,
+       this list of conditions and the following disclaimer.
+
+    2. Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/__init__.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_elffile.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_elffile.py
new file mode 100644
index 00000000..6fb19b30
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_elffile.py
@@ -0,0 +1,108 @@
+"""
+ELF file parser.
+
+This provides a class ``ELFFile`` that parses an ELF executable in a similar
+interface to ``ZipFile``. Only the read interface is implemented.
+
+Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
+ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
+"""
+
+import enum
+import os
+import struct
+from typing import IO, Optional, Tuple
+
+
+class ELFInvalid(ValueError):
+    pass
+
+
+class EIClass(enum.IntEnum):
+    C32 = 1
+    C64 = 2
+
+
+class EIData(enum.IntEnum):
+    Lsb = 1
+    Msb = 2
+
+
+class EMachine(enum.IntEnum):
+    I386 = 3
+    S390 = 22
+    Arm = 40
+    X8664 = 62
+    AArc64 = 183
+
+
+class ELFFile:
+    """
+    Representation of an ELF executable.
+    """
+
+    def __init__(self, f: IO[bytes]) -> None:
+        self._f = f
+
+        try:
+            ident = self._read("16B")
+        except struct.error:
+            raise ELFInvalid("unable to parse identification")
+        magic = bytes(ident[:4])
+        if magic != b"\x7fELF":
+            raise ELFInvalid(f"invalid magic: {magic!r}")
+
+        self.capacity = ident[4]  # Format for program header (bitness).
+        self.encoding = ident[5]  # Data structure encoding (endianness).
+
+        try:
+            # e_fmt: Format for program header.
+            # p_fmt: Format for section header.
+            # p_idx: Indexes to find p_type, p_offset, and p_filesz.
+            e_fmt, self._p_fmt, self._p_idx = {
+                (1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)),  # 32-bit LSB.
+                (1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)),  # 32-bit MSB.
+                (2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)),  # 64-bit LSB.
+                (2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)),  # 64-bit MSB.
+            }[(self.capacity, self.encoding)]
+        except KeyError:
+            raise ELFInvalid(
+                f"unrecognized capacity ({self.capacity}) or "
+                f"encoding ({self.encoding})"
+            )
+
+        try:
+            (
+                _,
+                self.machine,  # Architecture type.
+                _,
+                _,
+                self._e_phoff,  # Offset of program header.
+                _,
+                self.flags,  # Processor-specific flags.
+                _,
+                self._e_phentsize,  # Size of section.
+                self._e_phnum,  # Number of sections.
+            ) = self._read(e_fmt)
+        except struct.error as e:
+            raise ELFInvalid("unable to parse machine and section information") from e
+
+    def _read(self, fmt: str) -> Tuple[int, ...]:
+        return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
+
+    @property
+    def interpreter(self) -> Optional[str]:
+        """
+        The path recorded in the ``PT_INTERP`` section header.
+        """
+        for index in range(self._e_phnum):
+            self._f.seek(self._e_phoff + self._e_phentsize * index)
+            try:
+                data = self._read(self._p_fmt)
+            except struct.error:
+                continue
+            if data[self._p_idx[0]] != 3:  # Not PT_INTERP.
+                continue
+            self._f.seek(data[self._p_idx[1]])
+            return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
+        return None
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_manylinux.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_manylinux.py
new file mode 100644
index 00000000..1f5f4ab3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_manylinux.py
@@ -0,0 +1,260 @@
+import collections
+import contextlib
+import functools
+import os
+import re
+import sys
+import warnings
+from typing import Dict, Generator, Iterator, NamedTuple, Optional, Sequence, Tuple
+
+from ._elffile import EIClass, EIData, ELFFile, EMachine
+
+EF_ARM_ABIMASK = 0xFF000000
+EF_ARM_ABI_VER5 = 0x05000000
+EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+
+# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
+# as the type for `path` until then.
+@contextlib.contextmanager
+def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
+    try:
+        with open(path, "rb") as f:
+            yield ELFFile(f)
+    except (OSError, TypeError, ValueError):
+        yield None
+
+
+def _is_linux_armhf(executable: str) -> bool:
+    # hard-float ABI can be detected from the ELF header of the running
+    # process
+    # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+    with _parse_elf(executable) as f:
+        return (
+            f is not None
+            and f.capacity == EIClass.C32
+            and f.encoding == EIData.Lsb
+            and f.machine == EMachine.Arm
+            and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
+            and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
+        )
+
+
+def _is_linux_i686(executable: str) -> bool:
+    with _parse_elf(executable) as f:
+        return (
+            f is not None
+            and f.capacity == EIClass.C32
+            and f.encoding == EIData.Lsb
+            and f.machine == EMachine.I386
+        )
+
+
+def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
+    if "armv7l" in archs:
+        return _is_linux_armhf(executable)
+    if "i686" in archs:
+        return _is_linux_i686(executable)
+    allowed_archs = {
+        "x86_64",
+        "aarch64",
+        "ppc64",
+        "ppc64le",
+        "s390x",
+        "loongarch64",
+        "riscv64",
+    }
+    return any(arch in allowed_archs for arch in archs)
+
+
+# If glibc ever changes its major version, we need to know what the last
+# minor version was, so we can build the complete list of all versions.
+# For now, guess what the highest minor version might be, assume it will
+# be 50 for testing. Once this actually happens, update the dictionary
+# with the actual value.
+_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
+
+
+class _GLibCVersion(NamedTuple):
+    major: int
+    minor: int
+
+
+def _glibc_version_string_confstr() -> Optional[str]:
+    """
+    Primary implementation of glibc_version_string using os.confstr.
+    """
+    # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+    # to be broken or missing. This strategy is used in the standard library
+    # platform module.
+    # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
+    try:
+        # Should be a string like "glibc 2.17".
+        version_string: Optional[str] = os.confstr("CS_GNU_LIBC_VERSION")
+        assert version_string is not None
+        _, version = version_string.rsplit()
+    except (AssertionError, AttributeError, OSError, ValueError):
+        # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+        return None
+    return version
+
+
+def _glibc_version_string_ctypes() -> Optional[str]:
+    """
+    Fallback implementation of glibc_version_string using ctypes.
+    """
+    try:
+        import ctypes
+    except ImportError:
+        return None
+
+    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+    # manpage says, "If filename is NULL, then the returned handle is for the
+    # main program". This way we can let the linker do the work to figure out
+    # which libc our process is actually using.
+    #
+    # We must also handle the special case where the executable is not a
+    # dynamically linked executable. This can occur when using musl libc,
+    # for example. In this situation, dlopen() will error, leading to an
+    # OSError. Interestingly, at least in the case of musl, there is no
+    # errno set on the OSError. The single string argument used to construct
+    # OSError comes from libc itself and is therefore not portable to
+    # hard code here. In any case, failure to call dlopen() means we
+    # can proceed, so we bail on our attempt.
+    try:
+        process_namespace = ctypes.CDLL(None)
+    except OSError:
+        return None
+
+    try:
+        gnu_get_libc_version = process_namespace.gnu_get_libc_version
+    except AttributeError:
+        # Symbol doesn't exist -> therefore, we are not linked to
+        # glibc.
+        return None
+
+    # Call gnu_get_libc_version, which returns a string like "2.5"
+    gnu_get_libc_version.restype = ctypes.c_char_p
+    version_str: str = gnu_get_libc_version()
+    # py2 / py3 compatibility:
+    if not isinstance(version_str, str):
+        version_str = version_str.decode("ascii")
+
+    return version_str
+
+
+def _glibc_version_string() -> Optional[str]:
+    """Returns glibc version string, or None if not using glibc."""
+    return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
+    """Parse glibc version.
+
+    We use a regexp instead of str.split because we want to discard any
+    random junk that might come after the minor version -- this might happen
+    in patched/forked versions of glibc (e.g. Linaro's version of glibc
+    uses version strings like "2.20-2014.11"). See gh-3588.
+    """
+    m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+    if not m:
+        warnings.warn(
+            f"Expected glibc version with 2 components major.minor,"
+            f" got: {version_str}",
+            RuntimeWarning,
+        )
+        return -1, -1
+    return int(m.group("major")), int(m.group("minor"))
+
+
+@functools.lru_cache
+def _get_glibc_version() -> Tuple[int, int]:
+    version_str = _glibc_version_string()
+    if version_str is None:
+        return (-1, -1)
+    return _parse_glibc_version(version_str)
+
+
+# From PEP 513, PEP 600
+def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
+    sys_glibc = _get_glibc_version()
+    if sys_glibc < version:
+        return False
+    # Check for presence of _manylinux module.
+    try:
+        import _manylinux
+    except ImportError:
+        return True
+    if hasattr(_manylinux, "manylinux_compatible"):
+        result = _manylinux.manylinux_compatible(version[0], version[1], arch)
+        if result is not None:
+            return bool(result)
+        return True
+    if version == _GLibCVersion(2, 5):
+        if hasattr(_manylinux, "manylinux1_compatible"):
+            return bool(_manylinux.manylinux1_compatible)
+    if version == _GLibCVersion(2, 12):
+        if hasattr(_manylinux, "manylinux2010_compatible"):
+            return bool(_manylinux.manylinux2010_compatible)
+    if version == _GLibCVersion(2, 17):
+        if hasattr(_manylinux, "manylinux2014_compatible"):
+            return bool(_manylinux.manylinux2014_compatible)
+    return True
+
+
+_LEGACY_MANYLINUX_MAP = {
+    # CentOS 7 w/ glibc 2.17 (PEP 599)
+    (2, 17): "manylinux2014",
+    # CentOS 6 w/ glibc 2.12 (PEP 571)
+    (2, 12): "manylinux2010",
+    # CentOS 5 w/ glibc 2.5 (PEP 513)
+    (2, 5): "manylinux1",
+}
+
+
+def platform_tags(archs: Sequence[str]) -> Iterator[str]:
+    """Generate manylinux tags compatible to the current platform.
+
+    :param archs: Sequence of compatible architectures.
+        The first one shall be the closest to the actual architecture and be the part of
+        platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
+        The ``linux_`` prefix is assumed as a prerequisite for the current platform to
+        be manylinux-compatible.
+
+    :returns: An iterator of compatible manylinux tags.
+    """
+    if not _have_compatible_abi(sys.executable, archs):
+        return
+    # Oldest glibc to be supported regardless of architecture is (2, 17).
+    too_old_glibc2 = _GLibCVersion(2, 16)
+    if set(archs) & {"x86_64", "i686"}:
+        # On x86/i686 also oldest glibc to be supported is (2, 5).
+        too_old_glibc2 = _GLibCVersion(2, 4)
+    current_glibc = _GLibCVersion(*_get_glibc_version())
+    glibc_max_list = [current_glibc]
+    # We can assume compatibility across glibc major versions.
+    # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
+    #
+    # Build a list of maximum glibc versions so that we can
+    # output the canonical list of all glibc from current_glibc
+    # down to too_old_glibc2, including all intermediary versions.
+    for glibc_major in range(current_glibc.major - 1, 1, -1):
+        glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
+        glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
+    for arch in archs:
+        for glibc_max in glibc_max_list:
+            if glibc_max.major == too_old_glibc2.major:
+                min_minor = too_old_glibc2.minor
+            else:
+                # For other glibc major versions oldest supported is (x, 0).
+                min_minor = -1
+            for glibc_minor in range(glibc_max.minor, min_minor, -1):
+                glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
+                tag = "manylinux_{}_{}".format(*glibc_version)
+                if _is_compatible(arch, glibc_version):
+                    yield f"{tag}_{arch}"
+                # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
+                if glibc_version in _LEGACY_MANYLINUX_MAP:
+                    legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
+                    if _is_compatible(arch, glibc_version):
+                        yield f"{legacy_tag}_{arch}"
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_musllinux.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_musllinux.py
new file mode 100644
index 00000000..eb4251b5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_musllinux.py
@@ -0,0 +1,83 @@
+"""PEP 656 support.
+
+This module implements logic to detect if the currently running Python is
+linked against musl, and what musl version is used.
+"""
+
+import functools
+import re
+import subprocess
+import sys
+from typing import Iterator, NamedTuple, Optional, Sequence
+
+from ._elffile import ELFFile
+
+
+class _MuslVersion(NamedTuple):
+    major: int
+    minor: int
+
+
+def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
+    lines = [n for n in (n.strip() for n in output.splitlines()) if n]
+    if len(lines) < 2 or lines[0][:4] != "musl":
+        return None
+    m = re.match(r"Version (\d+)\.(\d+)", lines[1])
+    if not m:
+        return None
+    return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
+
+
+@functools.lru_cache
+def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
+    """Detect currently-running musl runtime version.
+
+    This is done by checking the specified executable's dynamic linking
+    information, and invoking the loader to parse its output for a version
+    string. If the loader is musl, the output would be something like::
+
+        musl libc (x86_64)
+        Version 1.2.2
+        Dynamic Program Loader
+    """
+    try:
+        with open(executable, "rb") as f:
+            ld = ELFFile(f).interpreter
+    except (OSError, TypeError, ValueError):
+        return None
+    if ld is None or "musl" not in ld:
+        return None
+    proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
+    return _parse_musl_version(proc.stderr)
+
+
+def platform_tags(archs: Sequence[str]) -> Iterator[str]:
+    """Generate musllinux tags compatible to the current platform.
+
+    :param archs: Sequence of compatible architectures.
+        The first one shall be the closest to the actual architecture and be the part of
+        platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
+        The ``linux_`` prefix is assumed as a prerequisite for the current platform to
+        be musllinux-compatible.
+
+    :returns: An iterator of compatible musllinux tags.
+    """
+    sys_musl = _get_musl_version(sys.executable)
+    if sys_musl is None:  # Python not dynamically linked against musl.
+        return
+    for arch in archs:
+        for minor in range(sys_musl.minor, -1, -1):
+            yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import sysconfig
+
+    plat = sysconfig.get_platform()
+    assert plat.startswith("linux-"), "not linux"
+
+    print("plat:", plat)
+    print("musl:", _get_musl_version(sys.executable))
+    print("tags:", end=" ")
+    for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
+        print(t, end="\n      ")
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_parser.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_parser.py
new file mode 100644
index 00000000..513686a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_parser.py
@@ -0,0 +1,356 @@
+"""Handwritten parser of dependency specifiers.
+
+The docstring for each __parse_* function contains EBNF-inspired grammar representing
+the implementation.
+"""
+
+import ast
+from typing import Any, List, NamedTuple, Optional, Tuple, Union
+
+from ._tokenizer import DEFAULT_RULES, Tokenizer
+
+
+class Node:
+    def __init__(self, value: str) -> None:
+        self.value = value
+
+    def __str__(self) -> str:
+        return self.value
+
+    def __repr__(self) -> str:
+        return f"<{self.__class__.__name__}('{self}')>"
+
+    def serialize(self) -> str:
+        raise NotImplementedError
+
+
+class Variable(Node):
+    def serialize(self) -> str:
+        return str(self)
+
+
+class Value(Node):
+    def serialize(self) -> str:
+        return f'"{self}"'
+
+
+class Op(Node):
+    def serialize(self) -> str:
+        return str(self)
+
+
+MarkerVar = Union[Variable, Value]
+MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
+# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
+# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
+# mypy does not support recursive type definition
+# https://github.com/python/mypy/issues/731
+MarkerAtom = Any
+MarkerList = List[Any]
+
+
+class ParsedRequirement(NamedTuple):
+    name: str
+    url: str
+    extras: List[str]
+    specifier: str
+    marker: Optional[MarkerList]
+
+
+# --------------------------------------------------------------------------------------
+# Recursive descent parser for dependency specifier
+# --------------------------------------------------------------------------------------
+def parse_requirement(source: str) -> ParsedRequirement:
+    return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
+
+
+def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
+    """
+    requirement = WS? IDENTIFIER WS? extras WS? requirement_details
+    """
+    tokenizer.consume("WS")
+
+    name_token = tokenizer.expect(
+        "IDENTIFIER", expected="package name at the start of dependency specifier"
+    )
+    name = name_token.text
+    tokenizer.consume("WS")
+
+    extras = _parse_extras(tokenizer)
+    tokenizer.consume("WS")
+
+    url, specifier, marker = _parse_requirement_details(tokenizer)
+    tokenizer.expect("END", expected="end of dependency specifier")
+
+    return ParsedRequirement(name, url, extras, specifier, marker)
+
+
+def _parse_requirement_details(
+    tokenizer: Tokenizer,
+) -> Tuple[str, str, Optional[MarkerList]]:
+    """
+    requirement_details = AT URL (WS requirement_marker?)?
+                        | specifier WS? (requirement_marker)?
+    """
+
+    specifier = ""
+    url = ""
+    marker = None
+
+    if tokenizer.check("AT"):
+        tokenizer.read()
+        tokenizer.consume("WS")
+
+        url_start = tokenizer.position
+        url = tokenizer.expect("URL", expected="URL after @").text
+        if tokenizer.check("END", peek=True):
+            return (url, specifier, marker)
+
+        tokenizer.expect("WS", expected="whitespace after URL")
+
+        # The input might end after whitespace.
+        if tokenizer.check("END", peek=True):
+            return (url, specifier, marker)
+
+        marker = _parse_requirement_marker(
+            tokenizer, span_start=url_start, after="URL and whitespace"
+        )
+    else:
+        specifier_start = tokenizer.position
+        specifier = _parse_specifier(tokenizer)
+        tokenizer.consume("WS")
+
+        if tokenizer.check("END", peek=True):
+            return (url, specifier, marker)
+
+        marker = _parse_requirement_marker(
+            tokenizer,
+            span_start=specifier_start,
+            after=(
+                "version specifier"
+                if specifier
+                else "name and no valid version specifier"
+            ),
+        )
+
+    return (url, specifier, marker)
+
+
+def _parse_requirement_marker(
+    tokenizer: Tokenizer, *, span_start: int, after: str
+) -> MarkerList:
+    """
+    requirement_marker = SEMICOLON marker WS?
+    """
+
+    if not tokenizer.check("SEMICOLON"):
+        tokenizer.raise_syntax_error(
+            f"Expected end or semicolon (after {after})",
+            span_start=span_start,
+        )
+    tokenizer.read()
+
+    marker = _parse_marker(tokenizer)
+    tokenizer.consume("WS")
+
+    return marker
+
+
+def _parse_extras(tokenizer: Tokenizer) -> List[str]:
+    """
+    extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
+    """
+    if not tokenizer.check("LEFT_BRACKET", peek=True):
+        return []
+
+    with tokenizer.enclosing_tokens(
+        "LEFT_BRACKET",
+        "RIGHT_BRACKET",
+        around="extras",
+    ):
+        tokenizer.consume("WS")
+        extras = _parse_extras_list(tokenizer)
+        tokenizer.consume("WS")
+
+    return extras
+
+
+def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
+    """
+    extras_list = identifier (wsp* ',' wsp* identifier)*
+    """
+    extras: List[str] = []
+
+    if not tokenizer.check("IDENTIFIER"):
+        return extras
+
+    extras.append(tokenizer.read().text)
+
+    while True:
+        tokenizer.consume("WS")
+        if tokenizer.check("IDENTIFIER", peek=True):
+            tokenizer.raise_syntax_error("Expected comma between extra names")
+        elif not tokenizer.check("COMMA"):
+            break
+
+        tokenizer.read()
+        tokenizer.consume("WS")
+
+        extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
+        extras.append(extra_token.text)
+
+    return extras
+
+
+def _parse_specifier(tokenizer: Tokenizer) -> str:
+    """
+    specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
+              | WS? version_many WS?
+    """
+    with tokenizer.enclosing_tokens(
+        "LEFT_PARENTHESIS",
+        "RIGHT_PARENTHESIS",
+        around="version specifier",
+    ):
+        tokenizer.consume("WS")
+        parsed_specifiers = _parse_version_many(tokenizer)
+        tokenizer.consume("WS")
+
+    return parsed_specifiers
+
+
+def _parse_version_many(tokenizer: Tokenizer) -> str:
+    """
+    version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
+    """
+    parsed_specifiers = ""
+    while tokenizer.check("SPECIFIER"):
+        span_start = tokenizer.position
+        parsed_specifiers += tokenizer.read().text
+        if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
+            tokenizer.raise_syntax_error(
+                ".* suffix can only be used with `==` or `!=` operators",
+                span_start=span_start,
+                span_end=tokenizer.position + 1,
+            )
+        if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
+            tokenizer.raise_syntax_error(
+                "Local version label can only be used with `==` or `!=` operators",
+                span_start=span_start,
+                span_end=tokenizer.position,
+            )
+        tokenizer.consume("WS")
+        if not tokenizer.check("COMMA"):
+            break
+        parsed_specifiers += tokenizer.read().text
+        tokenizer.consume("WS")
+
+    return parsed_specifiers
+
+
+# --------------------------------------------------------------------------------------
+# Recursive descent parser for marker expression
+# --------------------------------------------------------------------------------------
+def parse_marker(source: str) -> MarkerList:
+    return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))
+
+
+def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
+    retval = _parse_marker(tokenizer)
+    tokenizer.expect("END", expected="end of marker expression")
+    return retval
+
+
+def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
+    """
+    marker = marker_atom (BOOLOP marker_atom)+
+    """
+    expression = [_parse_marker_atom(tokenizer)]
+    while tokenizer.check("BOOLOP"):
+        token = tokenizer.read()
+        expr_right = _parse_marker_atom(tokenizer)
+        expression.extend((token.text, expr_right))
+    return expression
+
+
+def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
+    """
+    marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
+                | WS? marker_item WS?
+    """
+
+    tokenizer.consume("WS")
+    if tokenizer.check("LEFT_PARENTHESIS", peek=True):
+        with tokenizer.enclosing_tokens(
+            "LEFT_PARENTHESIS",
+            "RIGHT_PARENTHESIS",
+            around="marker expression",
+        ):
+            tokenizer.consume("WS")
+            marker: MarkerAtom = _parse_marker(tokenizer)
+            tokenizer.consume("WS")
+    else:
+        marker = _parse_marker_item(tokenizer)
+    tokenizer.consume("WS")
+    return marker
+
+
+def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
+    """
+    marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
+    """
+    tokenizer.consume("WS")
+    marker_var_left = _parse_marker_var(tokenizer)
+    tokenizer.consume("WS")
+    marker_op = _parse_marker_op(tokenizer)
+    tokenizer.consume("WS")
+    marker_var_right = _parse_marker_var(tokenizer)
+    tokenizer.consume("WS")
+    return (marker_var_left, marker_op, marker_var_right)
+
+
+def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
+    """
+    marker_var = VARIABLE | QUOTED_STRING
+    """
+    if tokenizer.check("VARIABLE"):
+        return process_env_var(tokenizer.read().text.replace(".", "_"))
+    elif tokenizer.check("QUOTED_STRING"):
+        return process_python_str(tokenizer.read().text)
+    else:
+        tokenizer.raise_syntax_error(
+            message="Expected a marker variable or quoted string"
+        )
+
+
+def process_env_var(env_var: str) -> Variable:
+    if env_var in ("platform_python_implementation", "python_implementation"):
+        return Variable("platform_python_implementation")
+    else:
+        return Variable(env_var)
+
+
+def process_python_str(python_str: str) -> Value:
+    value = ast.literal_eval(python_str)
+    return Value(str(value))
+
+
+def _parse_marker_op(tokenizer: Tokenizer) -> Op:
+    """
+    marker_op = IN | NOT IN | OP
+    """
+    if tokenizer.check("IN"):
+        tokenizer.read()
+        return Op("in")
+    elif tokenizer.check("NOT"):
+        tokenizer.read()
+        tokenizer.expect("WS", expected="whitespace after 'not'")
+        tokenizer.expect("IN", expected="'in' after 'not'")
+        return Op("not in")
+    elif tokenizer.check("OP"):
+        return Op(tokenizer.read().text)
+    else:
+        return tokenizer.raise_syntax_error(
+            "Expected marker operator, one of "
+            "<=, <, !=, ==, >=, >, ~=, ===, in, not in"
+        )
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_structures.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_structures.py
new file mode 100644
index 00000000..90a6465f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_structures.py
@@ -0,0 +1,61 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+
+class InfinityType:
+    def __repr__(self) -> str:
+        return "Infinity"
+
+    def __hash__(self) -> int:
+        return hash(repr(self))
+
+    def __lt__(self, other: object) -> bool:
+        return False
+
+    def __le__(self, other: object) -> bool:
+        return False
+
+    def __eq__(self, other: object) -> bool:
+        return isinstance(other, self.__class__)
+
+    def __gt__(self, other: object) -> bool:
+        return True
+
+    def __ge__(self, other: object) -> bool:
+        return True
+
+    def __neg__(self: object) -> "NegativeInfinityType":
+        return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType:
+    def __repr__(self) -> str:
+        return "-Infinity"
+
+    def __hash__(self) -> int:
+        return hash(repr(self))
+
+    def __lt__(self, other: object) -> bool:
+        return True
+
+    def __le__(self, other: object) -> bool:
+        return True
+
+    def __eq__(self, other: object) -> bool:
+        return isinstance(other, self.__class__)
+
+    def __gt__(self, other: object) -> bool:
+        return False
+
+    def __ge__(self, other: object) -> bool:
+        return False
+
+    def __neg__(self: object) -> InfinityType:
+        return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py
new file mode 100644
index 00000000..dd0d648d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py
@@ -0,0 +1,192 @@
+import contextlib
+import re
+from dataclasses import dataclass
+from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
+
+from .specifiers import Specifier
+
+
+@dataclass
+class Token:
+    name: str
+    text: str
+    position: int
+
+
+class ParserSyntaxError(Exception):
+    """The provided source text could not be parsed correctly."""
+
+    def __init__(
+        self,
+        message: str,
+        *,
+        source: str,
+        span: Tuple[int, int],
+    ) -> None:
+        self.span = span
+        self.message = message
+        self.source = source
+
+        super().__init__()
+
+    def __str__(self) -> str:
+        marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
+        return "\n    ".join([self.message, self.source, marker])
+
+
+DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
+    "LEFT_PARENTHESIS": r"\(",
+    "RIGHT_PARENTHESIS": r"\)",
+    "LEFT_BRACKET": r"\[",
+    "RIGHT_BRACKET": r"\]",
+    "SEMICOLON": r";",
+    "COMMA": r",",
+    "QUOTED_STRING": re.compile(
+        r"""
+            (
+                ('[^']*')
+                |
+                ("[^"]*")
+            )
+        """,
+        re.VERBOSE,
+    ),
+    "OP": r"(===|==|~=|!=|<=|>=|<|>)",
+    "BOOLOP": r"\b(or|and)\b",
+    "IN": r"\bin\b",
+    "NOT": r"\bnot\b",
+    "VARIABLE": re.compile(
+        r"""
+            \b(
+                python_version
+                |python_full_version
+                |os[._]name
+                |sys[._]platform
+                |platform_(release|system)
+                |platform[._](version|machine|python_implementation)
+                |python_implementation
+                |implementation_(name|version)
+                |extra
+            )\b
+        """,
+        re.VERBOSE,
+    ),
+    "SPECIFIER": re.compile(
+        Specifier._operator_regex_str + Specifier._version_regex_str,
+        re.VERBOSE | re.IGNORECASE,
+    ),
+    "AT": r"\@",
+    "URL": r"[^ \t]+",
+    "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
+    "VERSION_PREFIX_TRAIL": r"\.\*",
+    "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
+    "WS": r"[ \t]+",
+    "END": r"$",
+}
+
+
+class Tokenizer:
+    """Context-sensitive token parsing.
+
+    Provides methods to examine the input stream to check whether the next token
+    matches.
+    """
+
+    def __init__(
+        self,
+        source: str,
+        *,
+        rules: "Dict[str, Union[str, re.Pattern[str]]]",
+    ) -> None:
+        self.source = source
+        self.rules: Dict[str, re.Pattern[str]] = {
+            name: re.compile(pattern) for name, pattern in rules.items()
+        }
+        self.next_token: Optional[Token] = None
+        self.position = 0
+
+    def consume(self, name: str) -> None:
+        """Move beyond provided token name, if at current position."""
+        if self.check(name):
+            self.read()
+
+    def check(self, name: str, *, peek: bool = False) -> bool:
+        """Check whether the next token has the provided name.
+
+        By default, if the check succeeds, the token *must* be read before
+        another check. If `peek` is set to `True`, the token is not loaded and
+        would need to be checked again.
+        """
+        assert (
+            self.next_token is None
+        ), f"Cannot check for {name!r}, already have {self.next_token!r}"
+        assert name in self.rules, f"Unknown token name: {name!r}"
+
+        expression = self.rules[name]
+
+        match = expression.match(self.source, self.position)
+        if match is None:
+            return False
+        if not peek:
+            self.next_token = Token(name, match[0], self.position)
+        return True
+
+    def expect(self, name: str, *, expected: str) -> Token:
+        """Expect a certain token name next, failing with a syntax error otherwise.
+
+        The token is *not* read.
+        """
+        if not self.check(name):
+            raise self.raise_syntax_error(f"Expected {expected}")
+        return self.read()
+
+    def read(self) -> Token:
+        """Consume the next token and return it."""
+        token = self.next_token
+        assert token is not None
+
+        self.position += len(token.text)
+        self.next_token = None
+
+        return token
+
+    def raise_syntax_error(
+        self,
+        message: str,
+        *,
+        span_start: Optional[int] = None,
+        span_end: Optional[int] = None,
+    ) -> NoReturn:
+        """Raise ParserSyntaxError at the given position."""
+        span = (
+            self.position if span_start is None else span_start,
+            self.position if span_end is None else span_end,
+        )
+        raise ParserSyntaxError(
+            message,
+            source=self.source,
+            span=span,
+        )
+
+    @contextlib.contextmanager
+    def enclosing_tokens(
+        self, open_token: str, close_token: str, *, around: str
+    ) -> Iterator[None]:
+        if self.check(open_token):
+            open_position = self.position
+            self.read()
+        else:
+            open_position = None
+
+        yield
+
+        if open_position is None:
+            return
+
+        if not self.check(close_token):
+            self.raise_syntax_error(
+                f"Expected matching {close_token} for {open_token}, after {around}",
+                span_start=open_position,
+            )
+
+        self.read()
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/markers.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/markers.py
new file mode 100644
index 00000000..c96d22a5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/markers.py
@@ -0,0 +1,253 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import operator
+import os
+import platform
+import sys
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+from ._parser import (
+    MarkerAtom,
+    MarkerList,
+    Op,
+    Value,
+    Variable,
+)
+from ._parser import (
+    parse_marker as _parse_marker,
+)
+from ._tokenizer import ParserSyntaxError
+from .specifiers import InvalidSpecifier, Specifier
+from .utils import canonicalize_name
+
+__all__ = [
+    "InvalidMarker",
+    "UndefinedComparison",
+    "UndefinedEnvironmentName",
+    "Marker",
+    "default_environment",
+]
+
+Operator = Callable[[str, str], bool]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+def _normalize_extra_values(results: Any) -> Any:
+    """
+    Normalize extra values.
+    """
+    if isinstance(results[0], tuple):
+        lhs, op, rhs = results[0]
+        if isinstance(lhs, Variable) and lhs.value == "extra":
+            normalized_extra = canonicalize_name(rhs.value)
+            rhs = Value(normalized_extra)
+        elif isinstance(rhs, Variable) and rhs.value == "extra":
+            normalized_extra = canonicalize_name(lhs.value)
+            lhs = Value(normalized_extra)
+        results[0] = lhs, op, rhs
+    return results
+
+
+def _format_marker(
+    marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
+) -> str:
+    assert isinstance(marker, (list, tuple, str))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (
+        isinstance(marker, list)
+        and len(marker) == 1
+        and isinstance(marker[0], (list, tuple))
+    ):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators: Dict[str, Operator] = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs, prereleases=True)
+
+    oper: Optional[Operator] = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
+
+    return oper(lhs, rhs)
+
+
+def _normalize(*values: str, key: str) -> Tuple[str, ...]:
+    # PEP 685 – Comparison of extra names for optional distribution dependencies
+    # https://peps.python.org/pep-0685/
+    # > When comparing extra names, tools MUST normalize the names being
+    # > compared using the semantics outlined in PEP 503 for names
+    if key == "extra":
+        return tuple(canonicalize_name(v) for v in values)
+
+    # other environment markers don't have such standards
+    return values
+
+
+def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
+    groups: List[List[bool]] = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, str))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                environment_key = lhs.value
+                lhs_value = environment[environment_key]
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                environment_key = rhs.value
+                rhs_value = environment[environment_key]
+
+            lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info: "sys._version_info") -> str:
+    version = "{0.major}.{0.minor}.{0.micro}".format(info)
+    kind = info.releaselevel
+    if kind != "final":
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment() -> Dict[str, str]:
+    iver = format_full_version(sys.implementation.version)
+    implementation_name = sys.implementation.name
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": ".".join(platform.python_version_tuple()[:2]),
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker:
+    def __init__(self, marker: str) -> None:
+        # Note: We create a Marker object without calling this constructor in
+        #       packaging.requirements.Requirement. If any additional logic is
+        #       added here, make sure to mirror/adapt Requirement.
+        try:
+            self._markers = _normalize_extra_values(_parse_marker(marker))
+            # The attribute `_markers` can be described in terms of a recursive type:
+            # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
+            #
+            # For example, the following expression:
+            # python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
+            #
+            # is parsed into:
+            # [
+            #     (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
+            #     'and',
+            #     [
+            #         (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
+            #         'or',
+            #         (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
+            #     ]
+            # ]
+        except ParserSyntaxError as e:
+            raise InvalidMarker(str(e)) from e
+
+    def __str__(self) -> str:
+        return _format_marker(self._markers)
+
+    def __repr__(self) -> str:
+        return f"<Marker('{self}')>"
+
+    def __hash__(self) -> int:
+        return hash((self.__class__.__name__, str(self)))
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Marker):
+            return NotImplemented
+
+        return str(self) == str(other)
+
+    def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        current_environment["extra"] = ""
+        if environment is not None:
+            current_environment.update(environment)
+            # The API used to allow setting extra to None. We need to handle this
+            # case for backwards compatibility.
+            if current_environment["extra"] is None:
+                current_environment["extra"] = ""
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/requirements.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/requirements.py
new file mode 100644
index 00000000..bdc43a7e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/requirements.py
@@ -0,0 +1,90 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from typing import Any, Iterator, Optional, Set
+
+from ._parser import parse_requirement as _parse_requirement
+from ._tokenizer import ParserSyntaxError
+from .markers import Marker, _normalize_extra_values
+from .specifiers import SpecifierSet
+from .utils import canonicalize_name
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+class Requirement:
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string: str) -> None:
+        try:
+            parsed = _parse_requirement(requirement_string)
+        except ParserSyntaxError as e:
+            raise InvalidRequirement(str(e)) from e
+
+        self.name: str = parsed.name
+        self.url: Optional[str] = parsed.url or None
+        self.extras: Set[str] = set(parsed.extras or [])
+        self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
+        self.marker: Optional[Marker] = None
+        if parsed.marker is not None:
+            self.marker = Marker.__new__(Marker)
+            self.marker._markers = _normalize_extra_values(parsed.marker)
+
+    def _iter_parts(self, name: str) -> Iterator[str]:
+        yield name
+
+        if self.extras:
+            formatted_extras = ",".join(sorted(self.extras))
+            yield f"[{formatted_extras}]"
+
+        if self.specifier:
+            yield str(self.specifier)
+
+        if self.url:
+            yield f"@ {self.url}"
+            if self.marker:
+                yield " "
+
+        if self.marker:
+            yield f"; {self.marker}"
+
+    def __str__(self) -> str:
+        return "".join(self._iter_parts(self.name))
+
+    def __repr__(self) -> str:
+        return f"<Requirement('{self}')>"
+
+    def __hash__(self) -> int:
+        return hash(
+            (
+                self.__class__.__name__,
+                *self._iter_parts(canonicalize_name(self.name)),
+            )
+        )
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Requirement):
+            return NotImplemented
+
+        return (
+            canonicalize_name(self.name) == canonicalize_name(other.name)
+            and self.extras == other.extras
+            and self.specifier == other.specifier
+            and self.url == other.url
+            and self.marker == other.marker
+        )
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/specifiers.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/specifiers.py
new file mode 100644
index 00000000..6d4066ae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/specifiers.py
@@ -0,0 +1,1011 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+"""
+.. testsetup::
+
+    from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
+    from packaging.version import Version
+"""
+
+import abc
+import itertools
+import re
+from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union
+
+from .utils import canonicalize_version
+from .version import Version
+
+UnparsedVersion = Union[Version, str]
+UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
+CallableOperator = Callable[[Version, str], bool]
+
+
+def _coerce_version(version: UnparsedVersion) -> Version:
+    if not isinstance(version, Version):
+        version = Version(version)
+    return version
+
+
+class InvalidSpecifier(ValueError):
+    """
+    Raised when attempting to create a :class:`Specifier` with a specifier
+    string that is invalid.
+
+    >>> Specifier("lolwat")
+    Traceback (most recent call last):
+        ...
+    packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
+    """
+
+
+class BaseSpecifier(metaclass=abc.ABCMeta):
+    @abc.abstractmethod
+    def __str__(self) -> str:
+        """
+        Returns the str representation of this Specifier-like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self) -> int:
+        """
+        Returns a hash value for this Specifier-like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other: object) -> bool:
+        """
+        Returns a boolean representing whether or not the two Specifier-like
+        objects are equal.
+
+        :param other: The other object to check against.
+        """
+
+    @property
+    @abc.abstractmethod
+    def prereleases(self) -> Optional[bool]:
+        """Whether or not pre-releases as a whole are allowed.
+
+        This can be set to either ``True`` or ``False`` to explicitly enable or disable
+        prereleases or it can be set to ``None`` (the default) to use default semantics.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value: bool) -> None:
+        """Setter for :attr:`prereleases`.
+
+        :param value: The value to set.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(
+        self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
+    ) -> Iterator[UnparsedVersionVar]:
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class Specifier(BaseSpecifier):
+    """This class abstracts handling of version specifiers.
+
+    .. tip::
+
+        It is generally not required to instantiate this manually. You should instead
+        prefer to work with :class:`SpecifierSet` instead, which can parse
+        comma-separated version specifiers (which is what package metadata contains).
+    """
+
+    _operator_regex_str = r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        """
+    _version_regex_str = r"""
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s;)]*  # The arbitrary version can be just about anything,
+                          # we match everything except for whitespace, a
+                          # semi-colon for marker support, and a closing paren
+                          # since versions can be enclosed in them.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+
+                # You cannot use a wild card and a pre-release, post-release, a dev or
+                # local version together so group them with a | and make them optional.
+                (?:
+                    \.\*  # Wild card syntax of .*
+                    |
+                    (?:                                  # pre release
+                        [-_\.]?
+                        (alpha|beta|preview|pre|a|b|c|rc)
+                        [-_\.]?
+                        [0-9]*
+                    )?
+                    (?:                                  # post release
+                        (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                    )?
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (alpha|beta|preview|pre|a|b|c|rc)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (alpha|beta|preview|pre|a|b|c|rc)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+
+    _regex = re.compile(
+        r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+        """Initialize a Specifier instance.
+
+        :param spec:
+            The string representation of a specifier which will be parsed and
+            normalized before use.
+        :param prereleases:
+            This tells the specifier if it should accept prerelease versions if
+            applicable or not. The default of ``None`` will autodetect it from the
+            given specifiers.
+        :raises InvalidSpecifier:
+            If the given specifier is invalid (i.e. bad syntax).
+        """
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
+
+        self._spec: Tuple[str, str] = (
+            match.group("operator").strip(),
+            match.group("version").strip(),
+        )
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
+    @property  # type: ignore[override]
+    def prereleases(self) -> bool:
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if Version(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value: bool) -> None:
+        self._prereleases = value
+
+    @property
+    def operator(self) -> str:
+        """The operator of this specifier.
+
+        >>> Specifier("==1.2.3").operator
+        '=='
+        """
+        return self._spec[0]
+
+    @property
+    def version(self) -> str:
+        """The version of this specifier.
+
+        >>> Specifier("==1.2.3").version
+        '1.2.3'
+        """
+        return self._spec[1]
+
+    def __repr__(self) -> str:
+        """A representation of the Specifier that shows all internal state.
+
+        >>> Specifier('>=1.0.0')
+        <Specifier('>=1.0.0')>
+        >>> Specifier('>=1.0.0', prereleases=False)
+        <Specifier('>=1.0.0', prereleases=False)>
+        >>> Specifier('>=1.0.0', prereleases=True)
+        <Specifier('>=1.0.0', prereleases=True)>
+        """
+        pre = (
+            f", prereleases={self.prereleases!r}"
+            if self._prereleases is not None
+            else ""
+        )
+
+        return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
+
+    def __str__(self) -> str:
+        """A string representation of the Specifier that can be round-tripped.
+
+        >>> str(Specifier('>=1.0.0'))
+        '>=1.0.0'
+        >>> str(Specifier('>=1.0.0', prereleases=False))
+        '>=1.0.0'
+        """
+        return "{}{}".format(*self._spec)
+
+    @property
+    def _canonical_spec(self) -> Tuple[str, str]:
+        canonical_version = canonicalize_version(
+            self._spec[1],
+            strip_trailing_zero=(self._spec[0] != "~="),
+        )
+        return self._spec[0], canonical_version
+
+    def __hash__(self) -> int:
+        return hash(self._canonical_spec)
+
+    def __eq__(self, other: object) -> bool:
+        """Whether or not the two Specifier-like objects are equal.
+
+        :param other: The other object to check against.
+
+        The value of :attr:`prereleases` is ignored.
+
+        >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
+        True
+        >>> (Specifier("==1.2.3", prereleases=False) ==
+        ...  Specifier("==1.2.3", prereleases=True))
+        True
+        >>> Specifier("==1.2.3") == "==1.2.3"
+        True
+        >>> Specifier("==1.2.3") == Specifier("==1.2.4")
+        False
+        >>> Specifier("==1.2.3") == Specifier("~=1.2.3")
+        False
+        """
+        if isinstance(other, str):
+            try:
+                other = self.__class__(str(other))
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._canonical_spec == other._canonical_spec
+
+    def _get_operator(self, op: str) -> CallableOperator:
+        operator_callable: CallableOperator = getattr(
+            self, f"_compare_{self._operators[op]}"
+        )
+        return operator_callable
+
+    def _compare_compatible(self, prospective: Version, spec: str) -> bool:
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore suffix segments.
+        prefix = _version_join(
+            list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+            prospective, prefix
+        )
+
+    def _compare_equal(self, prospective: Version, spec: str) -> bool:
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            normalized_prospective = canonicalize_version(
+                prospective.public, strip_trailing_zero=False
+            )
+            # Get the normalized version string ignoring the trailing .*
+            normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
+            # Split the spec out by bangs and dots, and pretend that there is
+            # an implicit dot in between a release segment and a pre-release segment.
+            split_spec = _version_split(normalized_spec)
+
+            # Split the prospective version out by bangs and dots, and pretend
+            # that there is an implicit dot in between a release segment and
+            # a pre-release segment.
+            split_prospective = _version_split(normalized_prospective)
+
+            # 0-pad the prospective version before shortening it to get the correct
+            # shortened version.
+            padded_prospective, _ = _pad_version(split_prospective, split_spec)
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            shortened_prospective = padded_prospective[: len(split_spec)]
+
+            return shortened_prospective == split_spec
+        else:
+            # Convert our spec string into a Version
+            spec_version = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec_version.local:
+                prospective = Version(prospective.public)
+
+            return prospective == spec_version
+
+    def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
+        return not self._compare_equal(prospective, spec)
+
+    def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
+        # NB: Local version identifiers are NOT permitted in the version
+        # specifier, so local version labels can be universally removed from
+        # the prospective version.
+        return Version(prospective.public) <= Version(spec)
+
+    def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
+        # NB: Local version identifiers are NOT permitted in the version
+        # specifier, so local version labels can be universally removed from
+        # the prospective version.
+        return Version(prospective.public) >= Version(spec)
+
+    def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec_str)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec_str)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is technically greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
+        return str(prospective).lower() == str(spec).lower()
+
+    def __contains__(self, item: Union[str, Version]) -> bool:
+        """Return whether or not the item is contained in this specifier.
+
+        :param item: The item to check for.
+
+        This is used for the ``in`` operator and behaves the same as
+        :meth:`contains` with no ``prereleases`` argument passed.
+
+        >>> "1.2.3" in Specifier(">=1.2.3")
+        True
+        >>> Version("1.2.3") in Specifier(">=1.2.3")
+        True
+        >>> "1.0.0" in Specifier(">=1.2.3")
+        False
+        >>> "1.3.0a1" in Specifier(">=1.2.3")
+        False
+        >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
+        True
+        """
+        return self.contains(item)
+
+    def contains(
+        self, item: UnparsedVersion, prereleases: Optional[bool] = None
+    ) -> bool:
+        """Return whether or not the item is contained in this specifier.
+
+        :param item:
+            The item to check for, which can be a version string or a
+            :class:`Version` instance.
+        :param prereleases:
+            Whether or not to match prereleases with this Specifier. If set to
+            ``None`` (the default), it uses :attr:`prereleases` to determine
+            whether or not prereleases are allowed.
+
+        >>> Specifier(">=1.2.3").contains("1.2.3")
+        True
+        >>> Specifier(">=1.2.3").contains(Version("1.2.3"))
+        True
+        >>> Specifier(">=1.2.3").contains("1.0.0")
+        False
+        >>> Specifier(">=1.2.3").contains("1.3.0a1")
+        False
+        >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
+        True
+        >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
+        True
+        """
+
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version, this allows us to have a shortcut for
+        # "2.0" in Specifier(">=2")
+        normalized_item = _coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if normalized_item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        operator_callable: CallableOperator = self._get_operator(self.operator)
+        return operator_callable(normalized_item, self.version)
+
+    def filter(
+        self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
+    ) -> Iterator[UnparsedVersionVar]:
+        """Filter items in the given iterable, that match the specifier.
+
+        :param iterable:
+            An iterable that can contain version strings and :class:`Version` instances.
+            The items in the iterable will be filtered according to the specifier.
+        :param prereleases:
+            Whether or not to allow prereleases in the returned iterator. If set to
+            ``None`` (the default), it will be intelligently decide whether to allow
+            prereleases or not (based on the :attr:`prereleases` attribute, and
+            whether the only versions matching are prereleases).
+
+        This method is smarter than just ``filter(Specifier().contains, [...])``
+        because it implements the rule from :pep:`440` that a prerelease item
+        SHOULD be accepted if no other versions match the given specifier.
+
+        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
+        ['1.3']
+        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
+        ['1.2.3', '1.3', <Version('1.4')>]
+        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
+        ['1.5a1']
+        >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
+        ['1.3', '1.5a1']
+        >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
+        ['1.3', '1.5a1']
+        """
+
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = _coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later in case nothing
+                # else matches this specifier.
+                if parsed_version.is_prerelease and not (
+                    prereleases or self.prereleases
+                ):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the beginning.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version: str) -> List[str]:
+    """Split version into components.
+
+    The split components are intended for version comparison. The logic does
+    not attempt to retain the original version string, so joining the
+    components back with :func:`_version_join` may not produce the original
+    version string.
+    """
+    result: List[str] = []
+
+    epoch, _, rest = version.rpartition("!")
+    result.append(epoch or "0")
+
+    for item in rest.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _version_join(components: List[str]) -> str:
+    """Join split version components into a version string.
+
+    This function assumes the input came from :func:`_version_split`, where the
+    first component must be the epoch (either empty or numeric), and all other
+    components numeric.
+    """
+    epoch, *rest = components
+    return f"{epoch}!{'.'.join(rest)}"
+
+
+def _is_not_suffix(segment: str) -> bool:
+    return not any(
+        segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
+    )
+
+
+def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]) :])
+    right_split.append(right[len(right_split[0]) :])
+
+    # Insert our padding
+    left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+    right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+    return (
+        list(itertools.chain.from_iterable(left_split)),
+        list(itertools.chain.from_iterable(right_split)),
+    )
+
+
+class SpecifierSet(BaseSpecifier):
+    """This class abstracts handling of a set of version specifiers.
+
+    It can be passed a single specifier (``>=3.0``), a comma-separated list of
+    specifiers (``>=3.0,!=3.1``), or no specifier at all.
+    """
+
+    def __init__(
+        self, specifiers: str = "", prereleases: Optional[bool] = None
+    ) -> None:
+        """Initialize a SpecifierSet instance.
+
+        :param specifiers:
+            The string representation of a specifier or a comma-separated list of
+            specifiers which will be parsed and normalized before use.
+        :param prereleases:
+            This tells the SpecifierSet if it should accept prerelease versions if
+            applicable or not. The default of ``None`` will autodetect it from the
+            given specifiers.
+
+        :raises InvalidSpecifier:
+            If the given ``specifiers`` are not parseable than this exception will be
+            raised.
+        """
+
+        # Split on `,` to break each individual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Make each individual specifier a Specifier and save in a frozen set for later.
+        self._specs = frozenset(map(Specifier, split_specifiers))
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    @property
+    def prereleases(self) -> Optional[bool]:
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value: bool) -> None:
+        self._prereleases = value
+
+    def __repr__(self) -> str:
+        """A representation of the specifier set that shows all internal state.
+
+        Note that the ordering of the individual specifiers within the set may not
+        match the input string.
+
+        >>> SpecifierSet('>=1.0.0,!=2.0.0')
+        <SpecifierSet('!=2.0.0,>=1.0.0')>
+        >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
+        <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
+        >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
+        <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
+        """
+        pre = (
+            f", prereleases={self.prereleases!r}"
+            if self._prereleases is not None
+            else ""
+        )
+
+        return f"<SpecifierSet({str(self)!r}{pre})>"
+
+    def __str__(self) -> str:
+        """A string representation of the specifier set that can be round-tripped.
+
+        Note that the ordering of the individual specifiers within the set may not
+        match the input string.
+
+        >>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
+        '!=1.0.1,>=1.0.0'
+        >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
+        '!=1.0.1,>=1.0.0'
+        """
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self) -> int:
+        return hash(self._specs)
+
+    def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
+        """Return a SpecifierSet which is a combination of the two sets.
+
+        :param other: The other object to combine with.
+
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
+        <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
+        <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
+        """
+        if isinstance(other, str):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other: object) -> bool:
+        """Whether or not the two SpecifierSet-like objects are equal.
+
+        :param other: The other object to check against.
+
+        The value of :attr:`prereleases` is ignored.
+
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
+        True
+        >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
+        ...  SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
+        True
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
+        True
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
+        False
+        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
+        False
+        """
+        if isinstance(other, (str, Specifier)):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __len__(self) -> int:
+        """Returns the number of specifiers in this specifier set."""
+        return len(self._specs)
+
+    def __iter__(self) -> Iterator[Specifier]:
+        """
+        Returns an iterator over all the underlying :class:`Specifier` instances
+        in this specifier set.
+
+        >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
+        [<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
+        """
+        return iter(self._specs)
+
+    def __contains__(self, item: UnparsedVersion) -> bool:
+        """Return whether or not the item is contained in this specifier.
+
+        :param item: The item to check for.
+
+        This is used for the ``in`` operator and behaves the same as
+        :meth:`contains` with no ``prereleases`` argument passed.
+
+        >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
+        True
+        >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
+        True
+        >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
+        False
+        >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
+        False
+        >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
+        True
+        """
+        return self.contains(item)
+
+    def contains(
+        self,
+        item: UnparsedVersion,
+        prereleases: Optional[bool] = None,
+        installed: Optional[bool] = None,
+    ) -> bool:
+        """Return whether or not the item is contained in this SpecifierSet.
+
+        :param item:
+            The item to check for, which can be a version string or a
+            :class:`Version` instance.
+        :param prereleases:
+            Whether or not to match prereleases with this SpecifierSet. If set to
+            ``None`` (the default), it uses :attr:`prereleases` to determine
+            whether or not prereleases are allowed.
+
+        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
+        True
+        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
+        True
+        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
+        False
+        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
+        False
+        >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
+        True
+        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
+        True
+        """
+        # Ensure that our item is a Version instance.
+        if not isinstance(item, Version):
+            item = Version(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        if installed and item.is_prerelease:
+            item = Version(item.base_version)
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+    def filter(
+        self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
+    ) -> Iterator[UnparsedVersionVar]:
+        """Filter items in the given iterable, that match the specifiers in this set.
+
+        :param iterable:
+            An iterable that can contain version strings and :class:`Version` instances.
+            The items in the iterable will be filtered according to the specifier.
+        :param prereleases:
+            Whether or not to allow prereleases in the returned iterator. If set to
+            ``None`` (the default), it will be intelligently decide whether to allow
+            prereleases or not (based on the :attr:`prereleases` attribute, and
+            whether the only versions matching are prereleases).
+
+        This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
+        because it implements the rule from :pep:`440` that a prerelease item
+        SHOULD be accepted if no other versions match the given specifier.
+
+        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
+        ['1.3']
+        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
+        ['1.3', <Version('1.4')>]
+        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
+        []
+        >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
+        ['1.3', '1.5a1']
+        >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
+        ['1.3', '1.5a1']
+
+        An "empty" SpecifierSet will filter items based on the presence of prerelease
+        versions in the set.
+
+        >>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
+        ['1.3']
+        >>> list(SpecifierSet("").filter(["1.5a1"]))
+        ['1.5a1']
+        >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
+        ['1.3', '1.5a1']
+        >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
+        ['1.3', '1.5a1']
+        """
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iter(iterable)
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases.
+        else:
+            filtered: List[UnparsedVersionVar] = []
+            found_prereleases: List[UnparsedVersionVar] = []
+
+            for item in iterable:
+                parsed_version = _coerce_version(item)
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return iter(found_prereleases)
+
+            return iter(filtered)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/tags.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/tags.py
new file mode 100644
index 00000000..89f19261
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/tags.py
@@ -0,0 +1,571 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import logging
+import platform
+import re
+import struct
+import subprocess
+import sys
+import sysconfig
+from importlib.machinery import EXTENSION_SUFFIXES
+from typing import (
+    Dict,
+    FrozenSet,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+    cast,
+)
+
+from . import _manylinux, _musllinux
+
+logger = logging.getLogger(__name__)
+
+PythonVersion = Sequence[int]
+MacVersion = Tuple[int, int]
+
+INTERPRETER_SHORT_NAMES: Dict[str, str] = {
+    "python": "py",  # Generic.
+    "cpython": "cp",
+    "pypy": "pp",
+    "ironpython": "ip",
+    "jython": "jy",
+}
+
+
+_32_BIT_INTERPRETER = struct.calcsize("P") == 4
+
+
+class Tag:
+    """
+    A representation of the tag triple for a wheel.
+
+    Instances are considered immutable and thus are hashable. Equality checking
+    is also supported.
+    """
+
+    __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
+
+    def __init__(self, interpreter: str, abi: str, platform: str) -> None:
+        self._interpreter = interpreter.lower()
+        self._abi = abi.lower()
+        self._platform = platform.lower()
+        # The __hash__ of every single element in a Set[Tag] will be evaluated each time
+        # that a set calls its `.disjoint()` method, which may be called hundreds of
+        # times when scanning a page of links for packages with tags matching that
+        # Set[Tag]. Pre-computing the value here produces significant speedups for
+        # downstream consumers.
+        self._hash = hash((self._interpreter, self._abi, self._platform))
+
+    @property
+    def interpreter(self) -> str:
+        return self._interpreter
+
+    @property
+    def abi(self) -> str:
+        return self._abi
+
+    @property
+    def platform(self) -> str:
+        return self._platform
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, Tag):
+            return NotImplemented
+
+        return (
+            (self._hash == other._hash)  # Short-circuit ASAP for perf reasons.
+            and (self._platform == other._platform)
+            and (self._abi == other._abi)
+            and (self._interpreter == other._interpreter)
+        )
+
+    def __hash__(self) -> int:
+        return self._hash
+
+    def __str__(self) -> str:
+        return f"{self._interpreter}-{self._abi}-{self._platform}"
+
+    def __repr__(self) -> str:
+        return f"<{self} @ {id(self)}>"
+
+
+def parse_tag(tag: str) -> FrozenSet[Tag]:
+    """
+    Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+    Returning a set is required due to the possibility that the tag is a
+    compressed tag set.
+    """
+    tags = set()
+    interpreters, abis, platforms = tag.split("-")
+    for interpreter in interpreters.split("."):
+        for abi in abis.split("."):
+            for platform_ in platforms.split("."):
+                tags.add(Tag(interpreter, abi, platform_))
+    return frozenset(tags)
+
+
+def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
+    value: Union[int, str, None] = sysconfig.get_config_var(name)
+    if value is None and warn:
+        logger.debug(
+            "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+        )
+    return value
+
+
+def _normalize_string(string: str) -> str:
+    return string.replace(".", "_").replace("-", "_").replace(" ", "_")
+
+
+def _is_threaded_cpython(abis: List[str]) -> bool:
+    """
+    Determine if the ABI corresponds to a threaded (`--disable-gil`) build.
+
+    The threaded builds are indicated by a "t" in the abiflags.
+    """
+    if len(abis) == 0:
+        return False
+    # expect e.g., cp313
+    m = re.match(r"cp\d+(.*)", abis[0])
+    if not m:
+        return False
+    abiflags = m.group(1)
+    return "t" in abiflags
+
+
+def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:
+    """
+    Determine if the Python version supports abi3.
+
+    PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`)
+    builds do not support abi3.
+    """
+    return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading
+
+
+def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
+    py_version = tuple(py_version)  # To allow for version comparison.
+    abis = []
+    version = _version_nodot(py_version[:2])
+    threading = debug = pymalloc = ucs4 = ""
+    with_debug = _get_config_var("Py_DEBUG", warn)
+    has_refcount = hasattr(sys, "gettotalrefcount")
+    # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+    # extension modules is the best option.
+    # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+    has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+    if with_debug or (with_debug is None and (has_refcount or has_ext)):
+        debug = "d"
+    if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn):
+        threading = "t"
+    if py_version < (3, 8):
+        with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+        if with_pymalloc or with_pymalloc is None:
+            pymalloc = "m"
+        if py_version < (3, 3):
+            unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+            if unicode_size == 4 or (
+                unicode_size is None and sys.maxunicode == 0x10FFFF
+            ):
+                ucs4 = "u"
+    elif debug:
+        # Debug builds can also load "normal" extension modules.
+        # We can also assume no UCS-4 or pymalloc requirement.
+        abis.append(f"cp{version}{threading}")
+    abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}")
+    return abis
+
+
+def cpython_tags(
+    python_version: Optional[PythonVersion] = None,
+    abis: Optional[Iterable[str]] = None,
+    platforms: Optional[Iterable[str]] = None,
+    *,
+    warn: bool = False,
+) -> Iterator[Tag]:
+    """
+    Yields the tags for a CPython interpreter.
+
+    The tags consist of:
+    - cp<python_version>-<abi>-<platform>
+    - cp<python_version>-abi3-<platform>
+    - cp<python_version>-none-<platform>
+    - cp<less than python_version>-abi3-<platform>  # Older Python versions down to 3.2.
+
+    If python_version only specifies a major version then user-provided ABIs and
+    the 'none' ABItag will be used.
+
+    If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+    their normal position and not at the beginning.
+    """
+    if not python_version:
+        python_version = sys.version_info[:2]
+
+    interpreter = f"cp{_version_nodot(python_version[:2])}"
+
+    if abis is None:
+        if len(python_version) > 1:
+            abis = _cpython_abis(python_version, warn)
+        else:
+            abis = []
+    abis = list(abis)
+    # 'abi3' and 'none' are explicitly handled later.
+    for explicit_abi in ("abi3", "none"):
+        try:
+            abis.remove(explicit_abi)
+        except ValueError:
+            pass
+
+    platforms = list(platforms or platform_tags())
+    for abi in abis:
+        for platform_ in platforms:
+            yield Tag(interpreter, abi, platform_)
+
+    threading = _is_threaded_cpython(abis)
+    use_abi3 = _abi3_applies(python_version, threading)
+    if use_abi3:
+        yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
+    yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
+
+    if use_abi3:
+        for minor_version in range(python_version[1] - 1, 1, -1):
+            for platform_ in platforms:
+                interpreter = "cp{version}".format(
+                    version=_version_nodot((python_version[0], minor_version))
+                )
+                yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi() -> List[str]:
+    """
+    Return the ABI tag based on EXT_SUFFIX.
+    """
+    # The following are examples of `EXT_SUFFIX`.
+    # We want to keep the parts which are related to the ABI and remove the
+    # parts which are related to the platform:
+    # - linux:   '.cpython-310-x86_64-linux-gnu.so' => cp310
+    # - mac:     '.cpython-310-darwin.so'           => cp310
+    # - win:     '.cp310-win_amd64.pyd'             => cp310
+    # - win:     '.pyd'                             => cp37 (uses _cpython_abis())
+    # - pypy:    '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
+    # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
+    #                                               => graalpy_38_native
+
+    ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
+    if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
+        raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
+    parts = ext_suffix.split(".")
+    if len(parts) < 3:
+        # CPython3.7 and earlier uses ".pyd" on Windows.
+        return _cpython_abis(sys.version_info[:2])
+    soabi = parts[1]
+    if soabi.startswith("cpython"):
+        # non-windows
+        abi = "cp" + soabi.split("-")[1]
+    elif soabi.startswith("cp"):
+        # windows
+        abi = soabi.split("-")[0]
+    elif soabi.startswith("pypy"):
+        abi = "-".join(soabi.split("-")[:2])
+    elif soabi.startswith("graalpy"):
+        abi = "-".join(soabi.split("-")[:3])
+    elif soabi:
+        # pyston, ironpython, others?
+        abi = soabi
+    else:
+        return []
+    return [_normalize_string(abi)]
+
+
+def generic_tags(
+    interpreter: Optional[str] = None,
+    abis: Optional[Iterable[str]] = None,
+    platforms: Optional[Iterable[str]] = None,
+    *,
+    warn: bool = False,
+) -> Iterator[Tag]:
+    """
+    Yields the tags for a generic interpreter.
+
+    The tags consist of:
+    - <interpreter>-<abi>-<platform>
+
+    The "none" ABI will be added if it was not explicitly provided.
+    """
+    if not interpreter:
+        interp_name = interpreter_name()
+        interp_version = interpreter_version(warn=warn)
+        interpreter = "".join([interp_name, interp_version])
+    if abis is None:
+        abis = _generic_abi()
+    else:
+        abis = list(abis)
+    platforms = list(platforms or platform_tags())
+    if "none" not in abis:
+        abis.append("none")
+    for abi in abis:
+        for platform_ in platforms:
+            yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
+    """
+    Yields Python versions in descending order.
+
+    After the latest version, the major-only version will be yielded, and then
+    all previous versions of that major version.
+    """
+    if len(py_version) > 1:
+        yield f"py{_version_nodot(py_version[:2])}"
+    yield f"py{py_version[0]}"
+    if len(py_version) > 1:
+        for minor in range(py_version[1] - 1, -1, -1):
+            yield f"py{_version_nodot((py_version[0], minor))}"
+
+
+def compatible_tags(
+    python_version: Optional[PythonVersion] = None,
+    interpreter: Optional[str] = None,
+    platforms: Optional[Iterable[str]] = None,
+) -> Iterator[Tag]:
+    """
+    Yields the sequence of tags that are compatible with a specific version of Python.
+
+    The tags consist of:
+    - py*-none-<platform>
+    - <interpreter>-none-any  # ... if `interpreter` is provided.
+    - py*-none-any
+    """
+    if not python_version:
+        python_version = sys.version_info[:2]
+    platforms = list(platforms or platform_tags())
+    for version in _py_interpreter_range(python_version):
+        for platform_ in platforms:
+            yield Tag(version, "none", platform_)
+    if interpreter:
+        yield Tag(interpreter, "none", "any")
+    for version in _py_interpreter_range(python_version):
+        yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
+    if not is_32bit:
+        return arch
+
+    if arch.startswith("ppc"):
+        return "ppc"
+
+    return "i386"
+
+
+def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
+    formats = [cpu_arch]
+    if cpu_arch == "x86_64":
+        if version < (10, 4):
+            return []
+        formats.extend(["intel", "fat64", "fat32"])
+
+    elif cpu_arch == "i386":
+        if version < (10, 4):
+            return []
+        formats.extend(["intel", "fat32", "fat"])
+
+    elif cpu_arch == "ppc64":
+        # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+        if version > (10, 5) or version < (10, 4):
+            return []
+        formats.append("fat64")
+
+    elif cpu_arch == "ppc":
+        if version > (10, 6):
+            return []
+        formats.extend(["fat32", "fat"])
+
+    if cpu_arch in {"arm64", "x86_64"}:
+        formats.append("universal2")
+
+    if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
+        formats.append("universal")
+
+    return formats
+
+
+def mac_platforms(
+    version: Optional[MacVersion] = None, arch: Optional[str] = None
+) -> Iterator[str]:
+    """
+    Yields the platform tags for a macOS system.
+
+    The `version` parameter is a two-item tuple specifying the macOS version to
+    generate platform tags for. The `arch` parameter is the CPU architecture to
+    generate platform tags for. Both parameters default to the appropriate value
+    for the current system.
+    """
+    version_str, _, cpu_arch = platform.mac_ver()
+    if version is None:
+        version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+        if version == (10, 16):
+            # When built against an older macOS SDK, Python will report macOS 10.16
+            # instead of the real version.
+            version_str = subprocess.run(
+                [
+                    sys.executable,
+                    "-sS",
+                    "-c",
+                    "import platform; print(platform.mac_ver()[0])",
+                ],
+                check=True,
+                env={"SYSTEM_VERSION_COMPAT": "0"},
+                stdout=subprocess.PIPE,
+                text=True,
+            ).stdout
+            version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+    else:
+        version = version
+    if arch is None:
+        arch = _mac_arch(cpu_arch)
+    else:
+        arch = arch
+
+    if (10, 0) <= version and version < (11, 0):
+        # Prior to Mac OS 11, each yearly release of Mac OS bumped the
+        # "minor" version number.  The major version was always 10.
+        for minor_version in range(version[1], -1, -1):
+            compat_version = 10, minor_version
+            binary_formats = _mac_binary_formats(compat_version, arch)
+            for binary_format in binary_formats:
+                yield "macosx_{major}_{minor}_{binary_format}".format(
+                    major=10, minor=minor_version, binary_format=binary_format
+                )
+
+    if version >= (11, 0):
+        # Starting with Mac OS 11, each yearly release bumps the major version
+        # number.   The minor versions are now the midyear updates.
+        for major_version in range(version[0], 10, -1):
+            compat_version = major_version, 0
+            binary_formats = _mac_binary_formats(compat_version, arch)
+            for binary_format in binary_formats:
+                yield "macosx_{major}_{minor}_{binary_format}".format(
+                    major=major_version, minor=0, binary_format=binary_format
+                )
+
+    if version >= (11, 0):
+        # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
+        # Arm64 support was introduced in 11.0, so no Arm binaries from previous
+        # releases exist.
+        #
+        # However, the "universal2" binary format can have a
+        # macOS version earlier than 11.0 when the x86_64 part of the binary supports
+        # that version of macOS.
+        if arch == "x86_64":
+            for minor_version in range(16, 3, -1):
+                compat_version = 10, minor_version
+                binary_formats = _mac_binary_formats(compat_version, arch)
+                for binary_format in binary_formats:
+                    yield "macosx_{major}_{minor}_{binary_format}".format(
+                        major=compat_version[0],
+                        minor=compat_version[1],
+                        binary_format=binary_format,
+                    )
+        else:
+            for minor_version in range(16, 3, -1):
+                compat_version = 10, minor_version
+                binary_format = "universal2"
+                yield "macosx_{major}_{minor}_{binary_format}".format(
+                    major=compat_version[0],
+                    minor=compat_version[1],
+                    binary_format=binary_format,
+                )
+
+
+def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
+    linux = _normalize_string(sysconfig.get_platform())
+    if not linux.startswith("linux_"):
+        # we should never be here, just yield the sysconfig one and return
+        yield linux
+        return
+    if is_32bit:
+        if linux == "linux_x86_64":
+            linux = "linux_i686"
+        elif linux == "linux_aarch64":
+            linux = "linux_armv8l"
+    _, arch = linux.split("_", 1)
+    archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch])
+    yield from _manylinux.platform_tags(archs)
+    yield from _musllinux.platform_tags(archs)
+    for arch in archs:
+        yield f"linux_{arch}"
+
+
+def _generic_platforms() -> Iterator[str]:
+    yield _normalize_string(sysconfig.get_platform())
+
+
+def platform_tags() -> Iterator[str]:
+    """
+    Provides the platform tags for this installation.
+    """
+    if platform.system() == "Darwin":
+        return mac_platforms()
+    elif platform.system() == "Linux":
+        return _linux_platforms()
+    else:
+        return _generic_platforms()
+
+
+def interpreter_name() -> str:
+    """
+    Returns the name of the running interpreter.
+
+    Some implementations have a reserved, two-letter abbreviation which will
+    be returned when appropriate.
+    """
+    name = sys.implementation.name
+    return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(*, warn: bool = False) -> str:
+    """
+    Returns the version of the running interpreter.
+    """
+    version = _get_config_var("py_version_nodot", warn=warn)
+    if version:
+        version = str(version)
+    else:
+        version = _version_nodot(sys.version_info[:2])
+    return version
+
+
+def _version_nodot(version: PythonVersion) -> str:
+    return "".join(map(str, version))
+
+
+def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
+    """
+    Returns the sequence of tag triples for the running interpreter.
+
+    The order of the sequence corresponds to priority order for the
+    interpreter, from most to least important.
+    """
+
+    interp_name = interpreter_name()
+    if interp_name == "cp":
+        yield from cpython_tags(warn=warn)
+    else:
+        yield from generic_tags()
+
+    if interp_name == "pp":
+        interp = "pp3"
+    elif interp_name == "cp":
+        interp = "cp" + interpreter_version(warn=warn)
+    else:
+        interp = None
+    yield from compatible_tags(interpreter=interp)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/utils.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/utils.py
new file mode 100644
index 00000000..c2c2f75a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/utils.py
@@ -0,0 +1,172 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import re
+from typing import FrozenSet, NewType, Tuple, Union, cast
+
+from .tags import Tag, parse_tag
+from .version import InvalidVersion, Version
+
+BuildTag = Union[Tuple[()], Tuple[int, str]]
+NormalizedName = NewType("NormalizedName", str)
+
+
+class InvalidName(ValueError):
+    """
+    An invalid distribution name; users should refer to the packaging user guide.
+    """
+
+
+class InvalidWheelFilename(ValueError):
+    """
+    An invalid wheel filename was found, users should refer to PEP 427.
+    """
+
+
+class InvalidSdistFilename(ValueError):
+    """
+    An invalid sdist filename was found, users should refer to the packaging user guide.
+    """
+
+
+# Core metadata spec for `Name`
+_validate_regex = re.compile(
+    r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
+)
+_canonicalize_regex = re.compile(r"[-_.]+")
+_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
+# PEP 427: The build number must start with a digit.
+_build_tag_regex = re.compile(r"(\d+)(.*)")
+
+
+def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
+    if validate and not _validate_regex.match(name):
+        raise InvalidName(f"name is invalid: {name!r}")
+    # This is taken from PEP 503.
+    value = _canonicalize_regex.sub("-", name).lower()
+    return cast(NormalizedName, value)
+
+
+def is_normalized_name(name: str) -> bool:
+    return _normalized_regex.match(name) is not None
+
+
+def canonicalize_version(
+    version: Union[Version, str], *, strip_trailing_zero: bool = True
+) -> str:
+    """
+    This is very similar to Version.__str__, but has one subtle difference
+    with the way it handles the release segment.
+    """
+    if isinstance(version, str):
+        try:
+            parsed = Version(version)
+        except InvalidVersion:
+            # Legacy versions cannot be normalized
+            return version
+    else:
+        parsed = version
+
+    parts = []
+
+    # Epoch
+    if parsed.epoch != 0:
+        parts.append(f"{parsed.epoch}!")
+
+    # Release segment
+    release_segment = ".".join(str(x) for x in parsed.release)
+    if strip_trailing_zero:
+        # NB: This strips trailing '.0's to normalize
+        release_segment = re.sub(r"(\.0)+$", "", release_segment)
+    parts.append(release_segment)
+
+    # Pre-release
+    if parsed.pre is not None:
+        parts.append("".join(str(x) for x in parsed.pre))
+
+    # Post-release
+    if parsed.post is not None:
+        parts.append(f".post{parsed.post}")
+
+    # Development release
+    if parsed.dev is not None:
+        parts.append(f".dev{parsed.dev}")
+
+    # Local version segment
+    if parsed.local is not None:
+        parts.append(f"+{parsed.local}")
+
+    return "".join(parts)
+
+
+def parse_wheel_filename(
+    filename: str,
+) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
+    if not filename.endswith(".whl"):
+        raise InvalidWheelFilename(
+            f"Invalid wheel filename (extension must be '.whl'): {filename}"
+        )
+
+    filename = filename[:-4]
+    dashes = filename.count("-")
+    if dashes not in (4, 5):
+        raise InvalidWheelFilename(
+            f"Invalid wheel filename (wrong number of parts): {filename}"
+        )
+
+    parts = filename.split("-", dashes - 2)
+    name_part = parts[0]
+    # See PEP 427 for the rules on escaping the project name.
+    if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
+        raise InvalidWheelFilename(f"Invalid project name: {filename}")
+    name = canonicalize_name(name_part)
+
+    try:
+        version = Version(parts[1])
+    except InvalidVersion as e:
+        raise InvalidWheelFilename(
+            f"Invalid wheel filename (invalid version): {filename}"
+        ) from e
+
+    if dashes == 5:
+        build_part = parts[2]
+        build_match = _build_tag_regex.match(build_part)
+        if build_match is None:
+            raise InvalidWheelFilename(
+                f"Invalid build number: {build_part} in '{filename}'"
+            )
+        build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
+    else:
+        build = ()
+    tags = parse_tag(parts[-1])
+    return (name, version, build, tags)
+
+
+def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
+    if filename.endswith(".tar.gz"):
+        file_stem = filename[: -len(".tar.gz")]
+    elif filename.endswith(".zip"):
+        file_stem = filename[: -len(".zip")]
+    else:
+        raise InvalidSdistFilename(
+            f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
+            f" {filename}"
+        )
+
+    # We are requiring a PEP 440 version, which cannot contain dashes,
+    # so we split on the last dash.
+    name_part, sep, version_part = file_stem.rpartition("-")
+    if not sep:
+        raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
+
+    name = canonicalize_name(name_part)
+
+    try:
+        version = Version(version_part)
+    except InvalidVersion as e:
+        raise InvalidSdistFilename(
+            f"Invalid sdist filename (invalid version): {filename}"
+        ) from e
+
+    return (name, version)
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/version.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/version.py
new file mode 100644
index 00000000..cda8e999
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/packaging/version.py
@@ -0,0 +1,561 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+"""
+.. testsetup::
+
+    from packaging.version import parse, Version
+"""
+
+import itertools
+import re
+from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union
+
+from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
+
+__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
+
+LocalType = Tuple[Union[int, str], ...]
+
+CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]
+CmpLocalType = Union[
+    NegativeInfinityType,
+    Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],
+]
+CmpKey = Tuple[
+    int,
+    Tuple[int, ...],
+    CmpPrePostDevType,
+    CmpPrePostDevType,
+    CmpPrePostDevType,
+    CmpLocalType,
+]
+VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
+
+
+class _Version(NamedTuple):
+    epoch: int
+    release: Tuple[int, ...]
+    dev: Optional[Tuple[str, int]]
+    pre: Optional[Tuple[str, int]]
+    post: Optional[Tuple[str, int]]
+    local: Optional[LocalType]
+
+
+def parse(version: str) -> "Version":
+    """Parse the given version string.
+
+    >>> parse('1.0.dev1')
+    <Version('1.0.dev1')>
+
+    :param version: The version string to parse.
+    :raises InvalidVersion: When the version string is not a valid version.
+    """
+    return Version(version)
+
+
+class InvalidVersion(ValueError):
+    """Raised when a version string is not a valid version.
+
+    >>> Version("invalid")
+    Traceback (most recent call last):
+        ...
+    packaging.version.InvalidVersion: Invalid version: 'invalid'
+    """
+
+
+class _BaseVersion:
+    _key: Tuple[Any, ...]
+
+    def __hash__(self) -> int:
+        return hash(self._key)
+
+    # Please keep the duplicated `isinstance` check
+    # in the six comparisons hereunder
+    # unless you find a way to avoid adding overhead function calls.
+    def __lt__(self, other: "_BaseVersion") -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key < other._key
+
+    def __le__(self, other: "_BaseVersion") -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key <= other._key
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key == other._key
+
+    def __ge__(self, other: "_BaseVersion") -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key >= other._key
+
+    def __gt__(self, other: "_BaseVersion") -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key > other._key
+
+    def __ne__(self, other: object) -> bool:
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return self._key != other._key
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+_VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    <Version('1.0a5')>
+    >>> v2
+    <Version('1.0')>
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        <Version('1.0.0')>
+        """
+        return f"<Version('{self}')>"
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/vendor.txt b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/vendor.txt
new file mode 100644
index 00000000..14666103
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/vendored/vendor.txt
@@ -0,0 +1 @@
+packaging==24.0
diff --git a/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/wheelfile.py b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/wheelfile.py
new file mode 100644
index 00000000..0a0f4596
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/setuptools/_vendor/wheel/wheelfile.py
@@ -0,0 +1,227 @@
+from __future__ import annotations
+
+import csv
+import hashlib
+import os.path
+import re
+import stat
+import time
+from io import StringIO, TextIOWrapper
+from typing import IO, TYPE_CHECKING, Literal
+from zipfile import ZIP_DEFLATED, ZipFile, ZipInfo
+
+from wheel.cli import WheelError
+from wheel.util import log, urlsafe_b64decode, urlsafe_b64encode
+
+if TYPE_CHECKING:
+    from typing import Protocol, Sized, Union
+
+    from typing_extensions import Buffer
+
+    StrPath = Union[str, os.PathLike[str]]
+
+    class SizedBuffer(Sized, Buffer, Protocol): ...
+
+
+# Non-greedy matching of an optional build number may be too clever (more
+# invalid wheel filenames will match). Separate regex for .dist-info?
+WHEEL_INFO_RE = re.compile(
+    r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]+?))(-(?P<build>\d[^\s-]*))?
+     -(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>\S+)\.whl$""",
+    re.VERBOSE,
+)
+MINIMUM_TIMESTAMP = 315532800  # 1980-01-01 00:00:00 UTC
+
+
+def get_zipinfo_datetime(timestamp: float | None = None):
+    # Some applications need reproducible .whl files, but they can't do this without
+    # forcing the timestamp of the individual ZipInfo objects. See issue #143.
+    timestamp = int(os.environ.get("SOURCE_DATE_EPOCH", timestamp or time.time()))
+    timestamp = max(timestamp, MINIMUM_TIMESTAMP)
+    return time.gmtime(timestamp)[0:6]
+
+
+class WheelFile(ZipFile):
+    """A ZipFile derivative class that also reads SHA-256 hashes from
+    .dist-info/RECORD and checks any read files against those.
+    """
+
+    _default_algorithm = hashlib.sha256
+
+    def __init__(
+        self,
+        file: StrPath,
+        mode: Literal["r", "w", "x", "a"] = "r",
+        compression: int = ZIP_DEFLATED,
+    ):
+        basename = os.path.basename(file)
+        self.parsed_filename = WHEEL_INFO_RE.match(basename)
+        if not basename.endswith(".whl") or self.parsed_filename is None:
+            raise WheelError(f"Bad wheel filename {basename!r}")
+
+        ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
+
+        self.dist_info_path = "{}.dist-info".format(
+            self.parsed_filename.group("namever")
+        )
+        self.record_path = self.dist_info_path + "/RECORD"
+        self._file_hashes: dict[str, tuple[None, None] | tuple[int, bytes]] = {}
+        self._file_sizes = {}
+        if mode == "r":
+            # Ignore RECORD and any embedded wheel signatures
+            self._file_hashes[self.record_path] = None, None
+            self._file_hashes[self.record_path + ".jws"] = None, None
+            self._file_hashes[self.record_path + ".p7s"] = None, None
+
+            # Fill in the expected hashes by reading them from RECORD
+            try:
+                record = self.open(self.record_path)
+            except KeyError:
+                raise WheelError(f"Missing {self.record_path} file") from None
+
+            with record:
+                for line in csv.reader(
+                    TextIOWrapper(record, newline="", encoding="utf-8")
+                ):
+                    path, hash_sum, size = line
+                    if not hash_sum:
+                        continue
+
+                    algorithm, hash_sum = hash_sum.split("=")
+                    try:
+                        hashlib.new(algorithm)
+                    except ValueError:
+                        raise WheelError(
+                            f"Unsupported hash algorithm: {algorithm}"
+                        ) from None
+
+                    if algorithm.lower() in {"md5", "sha1"}:
+                        raise WheelError(
+                            f"Weak hash algorithm ({algorithm}) is not permitted by "
+                            f"PEP 427"
+                        )
+
+                    self._file_hashes[path] = (
+                        algorithm,
+                        urlsafe_b64decode(hash_sum.encode("ascii")),
+                    )
+
+    def open(
+        self,
+        name_or_info: str | ZipInfo,
+        mode: Literal["r", "w"] = "r",
+        pwd: bytes | None = None,
+    ) -> IO[bytes]:
+        def _update_crc(newdata: bytes) -> None:
+            eof = ef._eof
+            update_crc_orig(newdata)
+            running_hash.update(newdata)
+            if eof and running_hash.digest() != expected_hash:
+                raise WheelError(f"Hash mismatch for file '{ef_name}'")
+
+        ef_name = (
+            name_or_info.filename if isinstance(name_or_info, ZipInfo) else name_or_info
+        )
+        if (
+            mode == "r"
+            and not ef_name.endswith("/")
+            and ef_name not in self._file_hashes
+        ):
+            raise WheelError(f"No hash found for file '{ef_name}'")
+
+        ef = ZipFile.open(self, name_or_info, mode, pwd)
+        if mode == "r" and not ef_name.endswith("/"):
+            algorithm, expected_hash = self._file_hashes[ef_name]
+            if expected_hash is not None:
+                # Monkey patch the _update_crc method to also check for the hash from
+                # RECORD
+                running_hash = hashlib.new(algorithm)
+                update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
+
+        return ef
+
+    def write_files(self, base_dir: str):
+        log.info(f"creating '{self.filename}' and adding '{base_dir}' to it")
+        deferred: list[tuple[str, str]] = []
+        for root, dirnames, filenames in os.walk(base_dir):
+            # Sort the directory names so that `os.walk` will walk them in a
+            # defined order on the next iteration.
+            dirnames.sort()
+            for name in sorted(filenames):
+                path = os.path.normpath(os.path.join(root, name))
+                if os.path.isfile(path):
+                    arcname = os.path.relpath(path, base_dir).replace(os.path.sep, "/")
+                    if arcname == self.record_path:
+                        pass
+                    elif root.endswith(".dist-info"):
+                        deferred.append((path, arcname))
+                    else:
+                        self.write(path, arcname)
+
+        deferred.sort()
+        for path, arcname in deferred:
+            self.write(path, arcname)
+
+    def write(
+        self,
+        filename: str,
+        arcname: str | None = None,
+        compress_type: int | None = None,
+    ) -> None:
+        with open(filename, "rb") as f:
+            st = os.fstat(f.fileno())
+            data = f.read()
+
+        zinfo = ZipInfo(
+            arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime)
+        )
+        zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
+        zinfo.compress_type = compress_type or self.compression
+        self.writestr(zinfo, data, compress_type)
+
+    def writestr(
+        self,
+        zinfo_or_arcname: str | ZipInfo,
+        data: SizedBuffer | str,
+        compress_type: int | None = None,
+    ):
+        if isinstance(zinfo_or_arcname, str):
+            zinfo_or_arcname = ZipInfo(
+                zinfo_or_arcname, date_time=get_zipinfo_datetime()
+            )
+            zinfo_or_arcname.compress_type = self.compression
+            zinfo_or_arcname.external_attr = (0o664 | stat.S_IFREG) << 16
+
+        if isinstance(data, str):
+            data = data.encode("utf-8")
+
+        ZipFile.writestr(self, zinfo_or_arcname, data, compress_type)
+        fname = (
+            zinfo_or_arcname.filename
+            if isinstance(zinfo_or_arcname, ZipInfo)
+            else zinfo_or_arcname
+        )
+        log.info(f"adding '{fname}'")
+        if fname != self.record_path:
+            hash_ = self._default_algorithm(data)
+            self._file_hashes[fname] = (
+                hash_.name,
+                urlsafe_b64encode(hash_.digest()).decode("ascii"),
+            )
+            self._file_sizes[fname] = len(data)
+
+    def close(self):
+        # Write RECORD
+        if self.fp is not None and self.mode == "w" and self._file_hashes:
+            data = StringIO()
+            writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
+            writer.writerows(
+                (
+                    (fname, algorithm + "=" + hash_, self._file_sizes[fname])
+                    for fname, (algorithm, hash_) in self._file_hashes.items()
+                )
+            )
+            writer.writerow((format(self.record_path), "", ""))
+            self.writestr(self.record_path, data.getvalue())
+
+        ZipFile.close(self)